diff --git a/dask_kubernetes/conftest.py b/dask_kubernetes/conftest.py index f6df4a852..6e82ce0c2 100644 --- a/dask_kubernetes/conftest.py +++ b/dask_kubernetes/conftest.py @@ -26,7 +26,9 @@ def kopf_runner(k8s_cluster): @pytest.fixture(scope="session") def docker_image(): image_name = "dask-kubernetes:dev" - subprocess.run(["docker", "build", "-t", image_name, "./ci/"], check=True) + project_root = pathlib.Path(__file__).parent.parent + ci_dir = project_root / "ci" + subprocess.run(["docker", "build", "-t", image_name, str(ci_dir)], check=True) return image_name diff --git a/dask_kubernetes/operator/deployment/helm/chartpress.yaml b/dask_kubernetes/operator/deployment/helm/chartpress.yaml index 797d1b961..2e6c7dd62 100644 --- a/dask_kubernetes/operator/deployment/helm/chartpress.yaml +++ b/dask_kubernetes/operator/deployment/helm/chartpress.yaml @@ -1,6 +1,6 @@ # This is configuration for chartpress, a CLI for Helm chart management. # -# chartpress is used to test, package, and publish the dask-gateway Helm chart +# chartpress is used to test, package, and publish the dask-kubernetes-operator Helm chart # to the gh-pages based Helm chart repository at https://helm.dask.org and # https://github.com/dask/helm-chart respectively. Note that a Helm chart # repository is just a website that can serve a "index.yaml" file pointing to diff --git a/dask_kubernetes/operator/deployment/helm/dask-kubernetes-operator/Chart.yaml b/dask_kubernetes/operator/deployment/helm/dask-kubernetes-operator/Chart.yaml index 0666e2cb2..6ce3eb04e 100644 --- a/dask_kubernetes/operator/deployment/helm/dask-kubernetes-operator/Chart.yaml +++ b/dask_kubernetes/operator/deployment/helm/dask-kubernetes-operator/Chart.yaml @@ -3,7 +3,7 @@ name: dask-kubernetes-operator description: A helm chart for managing the deployment of the dask kubernetes operator and CRDs type: application version: 0.0.1-set.by.chartpress -appVersion: "2022.4.1" +appVersion: "2022.7.0" home: https://kubernetes.dask.org/ sources: - https://github.com/dask/dask-kubernetes/ diff --git a/dask_kubernetes/operator/deployment/tests/test_smoke_helm.py b/dask_kubernetes/operator/deployment/tests/test_smoke_helm.py new file mode 100644 index 000000000..8fbb1e8a8 --- /dev/null +++ b/dask_kubernetes/operator/deployment/tests/test_smoke_helm.py @@ -0,0 +1,101 @@ +import subprocess +import uuid +from pathlib import Path + +import pytest + +from dask_kubernetes.operator import KubeCluster + + +@pytest.fixture(scope="module") +def operator_image(k8s_cluster): + image_name = "dask-kubernetes-operator:dev" + project_root = Path(__file__).parent.parent.parent.parent.parent + dockerfile = Path(__file__).parent.parent / "Dockerfile" + subprocess.run( + ["docker", "build", "-t", image_name, "-f", dockerfile, str(project_root)], + check=True, + ) + k8s_cluster.load_docker_image(image_name) + return image_name + + +@pytest.fixture +def install_cluster_role_helm_chart(k8s_cluster, operator_image, ns): + this_dir = Path(__file__).parent + helm_chart_dir = this_dir.parent / "helm" / "dask-kubernetes-operator" + release_name = f"pytest-smoke-operator-{str(uuid.uuid4())[:8]}" + docker_image_name = operator_image.split(":")[0] + docker_image_tag = operator_image.split(":")[1] + subprocess.run( + [ + "helm", + "install", + "-n", + ns, + release_name, + str(helm_chart_dir), + f"--set=image.name={docker_image_name}", + f"--set=image.tag={docker_image_tag}", + "--wait", + ], + check=True, + ) + yield + subprocess.run( + ["helm", "uninstall", "-n", ns, release_name], + check=True, + ) + + +@pytest.fixture +def install_role_helm_chart(k8s_cluster, operator_image, ns): + this_dir = Path(__file__).parent + helm_chart_dir = this_dir.parent / "helm" / "dask-kubernetes-operator" + release_name = f"pytest-smoke-operator-{str(uuid.uuid4())[:8]}" + docker_image_name = operator_image.split(":")[0] + docker_image_tag = operator_image.split(":")[1] + subprocess.run( + [ + "helm", + "install", + "-n", + ns, + release_name, + str(helm_chart_dir), + f"--set=image.name={docker_image_name}", + f"--set=image.tag={docker_image_tag}", + "--set=rbac.create=false", + "--wait", + ], + check=True, + ) + yield + subprocess.run( + ["helm", "uninstall", "-n", ns, release_name], + check=True, + ) + + +@pytest.mark.timeout(180) +def test_smoke_helm_deployment_cluster_role(install_cluster_role_helm_chart, ns): + with KubeCluster( + name="pytest-smoke-cluster", + namespace=ns, + shutdown_on_close=True, + ) as cluster: + cluster.scale(2) + client = cluster.get_client() + client.wait_for_workers(2, timeout=120) + + +@pytest.mark.timeout(180) +def test_smoke_helm_deployment_role(install_role_helm_chart, ns): + with KubeCluster( + name="pytest-smoke-cluster", + namespace=ns, + shutdown_on_close=True, + ) as cluster: + cluster.scale(2) + client = cluster.get_client() + client.wait_for_workers(2, timeout=120)