diff --git a/.env.example b/.env.example index 68bab62c..892ecb2b 100644 --- a/.env.example +++ b/.env.example @@ -13,7 +13,6 @@ BUILD_BUILDID = '001' # Azure ML Workspace Variables WORKSPACE_NAME = 'aml-workspace' EXPERIMENT_NAME = '' -SCRIPT_FOLDER = './' # AML Compute Cluster Config AML_COMPUTE_CLUSTER_NAME = 'train-cluster' @@ -32,6 +31,7 @@ EVALUATE_SCRIPT_PATH = 'evaluate/evaluate_model.py' REGISTER_SCRIPT_PATH = 'register/register_model.py' SOURCES_DIR_TRAIN = 'diabetes_regression' DATASET_NAME = 'diabetes_ds' +SCORE_SCRIPT = 'scoring/score.py' # Optional. Used by a training pipeline with R on Databricks DB_CLUSTER_ID = '' diff --git a/.pipelines/azdo-abtest-pipeline.yml b/.pipelines/azdo-abtest-pipeline.yml index f532c33c..d6728d81 100644 --- a/.pipelines/azdo-abtest-pipeline.yml +++ b/.pipelines/azdo-abtest-pipeline.yml @@ -28,7 +28,7 @@ variables: - name: 'greenReleaseName' value: 'model-green' - name: 'SCORE_SCRIPT' - value: 'scoreA.py' + value: 'scoring/scoreA.py' stages: - stage: 'Building' diff --git a/.pipelines/diabetes_regression-ci-image.yml b/.pipelines/diabetes_regression-ci-image.yml index a3ab2937..63041db5 100644 --- a/.pipelines/diabetes_regression-ci-image.yml +++ b/.pipelines/diabetes_regression-ci-image.yml @@ -27,7 +27,7 @@ container: mlops variables: - group: devopsforai-aml-vg - name: 'SCORE_SCRIPT' - value: 'scoreB.py' + value: 'scoring/scoreB.py' steps: - task: AzureCLI@1 diff --git a/.pipelines/diabetes_regression-variables.yml b/.pipelines/diabetes_regression-variables.yml index ef27bf5a..af32282e 100644 --- a/.pipelines/diabetes_regression-variables.yml +++ b/.pipelines/diabetes_regression-variables.yml @@ -1,8 +1,32 @@ # Pipeline template that defines common runtime environment variables. variables: - # Azure ML Workspace Variables + + # Source Config + # The directory containing the scripts for training, evaluating, and registering the model + - name: SOURCES_DIR_TRAIN + value: diabetes_regression + # The path to the model training script under SOURCES_DIR_TRAIN + - name: TRAIN_SCRIPT_PATH + value: training/train.py + # The path to the model evaluation script under SOURCES_DIR_TRAIN + - name: EVALUATE_SCRIPT_PATH + value: evaluate/evaluate_model.py + # The path to the model registration script under SOURCES_DIR_TRAIN + - name: REGISTER_SCRIPT_PATH + value: register/register_model.py + # The path to the model scoring script relative to SOURCES_DIR_TRAIN + - name: SCORE_SCRIPT + value: scoring/score.py + + # Azure ML Variables - name: EXPERIMENT_NAME value: mlopspython + - name: DATASET_NAME + value: diabetes_ds + - name: TRAINING_PIPELINE_NAME + value: "diabetes-Training-Pipeline" + - name: MODEL_NAME + value: diabetes_regression_model.pkl # AML Compute Cluster Config - name: AML_COMPUTE_CLUSTER_CPU_SKU @@ -16,43 +40,23 @@ variables: - name: AML_CLUSTER_PRIORITY value: lowpriority - # Training Config - - name: BUILD_TRAIN_SCRIPT - value: diabetes_regression_build_train_pipeline.py - - name: TRAIN_SCRIPT_PATH - value: training/train.py - - name: MODEL_NAME - value: sklearn_regression_model.pkl - - name: MODEL_VERSION - value: "1" - - # AML Pipeline Config - - name: TRAINING_PIPELINE_NAME - value: "diabetes-Training-Pipeline" - - name: MODEL_PATH - value: "" - - name: EVALUATE_SCRIPT_PATH - value: evaluate/evaluate_model.py - - name: REGISTER_SCRIPT_PATH - value: register/register_model.py - - name: SOURCES_DIR_TRAIN - value: diabetes_regression + # The name for the (docker/webapp) scoring image - name: IMAGE_NAME value: "diabetestrained" + # Optional. Used by a training pipeline with R on Databricks - name: DB_CLUSTER_ID value: "" - - name: SCORE_SCRIPT - value: score.py - - name: DATASET_NAME - value: diabetes_ds + # These are the default values set in ml_service\util\env_variables.py. Uncomment and override if desired. + # Set to false to disable the evaluation step in the ML pipeline and register the newly trained model unconditionally. # - name: RUN_EVALUATION # value: "true" + # Set to false to register the model regardless of the outcome of the evaluation step in the ML pipeline. # - name: ALLOW_RUN_CANCEL # value: "true" - # For debugging deployment issues. Specify a build id with the MODEL_BUILD_ID pipeline variable at queue time - # to skip training and deploy a model registered by a previous build. + # For debugging deployment issues. Specify a build id with the MODEL_BUILD_ID pipeline variable at queue time + # to skip training and deploy a model registered by a previous build. - name: modelbuildid value: $[coalesce(variables['MODEL_BUILD_ID'], variables['Build.BuildId'])] \ No newline at end of file diff --git a/docs/canary_ab_deployment.md b/docs/canary_ab_deployment.md index fb2b38ec..7e5703b4 100644 --- a/docs/canary_ab_deployment.md +++ b/docs/canary_ab_deployment.md @@ -6,7 +6,7 @@ If your target deployment environment is a K8s cluster and you want to implement **Note:** It is assumed that you have an AKS instance and configured ***kubectl*** to communicate with the cluster. -#### 1. Install Istio on a K8s cluster. +#### 1. Install Istio on a K8s cluster. This guidance uses [Istio](https://istio.io) service mesh implememtation to control traffic routing between model versions. The instruction on installing Istio is available [here](https://docs.microsoft.com/en-us/azure/aks/servicemesh-istio-install?pivots=client-operating-system-linux). @@ -28,7 +28,6 @@ There are some extra variables that you need to setup in ***devopsforai-aml-vg** | K8S_AB_NAMESPACE | Namespace in a K8s cluster to deploy the model | | IMAGE_REPO_NAME | Image reposiory name (e.g. mlopspyciamlcr.azurecr.io)| - #### 3. Configure a pipeline to build and deploy a scoring Image Import and run the [azdo-abtest-pipeline.yml](./.pipelines/azdo-abtest-pipeline.yml) multistage deployment pipeline. @@ -37,7 +36,7 @@ The result of the pipeline will be a registered Docker image in the ACR reposito ![scoring image](./images/scoring_image.png) -The pipeline creates Istio Gateway and VirtualService and deploys the scoring image to the Kubernetes cluster. +The pipeline creates Istio Gateway and VirtualService and deploys the scoring image to the Kubernetes cluster. ```bash kubectl get deployments --namespace abtesting @@ -45,9 +44,9 @@ NAME READY UP-TO-DATE AVAILABLE AGE model-green 1/1 1 1 19h ``` -#### 4. Build a new Scoring Image. +#### 4. Build a new Scoring Image -Change value of the ***SCORE_SCRIPT*** variable in the [azdo-abtest-pipeline.yml](./.pipelines/azdo-abtest-pipeline.yml) to point to ***scoreA.py*** and merge it to the master branch. +Change value of the ***SCORE_SCRIPT*** variable in the [azdo-abtest-pipeline.yml](./.pipelines/azdo-abtest-pipeline.yml) to point to ***scoring/scoreA.py*** and merge it to the master branch. **Note:** ***scoreA.py*** and ***scoreB.py*** files used in this tutorial are just mockups returning either "New Model A" or "New Model B" respectively. They are used to demonstrate the concept of testing two scoring images with different models or scoring code. In real life you would implement a scoring file similar to [score.py](./../code/scoring/score.py) (see [getting started](./getting_started.md)). @@ -61,7 +60,6 @@ It will automatically trigger the pipeline and deploy a new scoring image with t | Blue_Green |0 |100 |Old green image is removed. The new blue image is copied as green.
Blue and Green images are equal.
All traffic (100%) is routed to the blue image.| | Green_100 |100 |0 |All traffic (100%) is routed to the green image.
The blue image is removed - **Note:** The pipeline performs the rollout without any pausing. You may want to configure [Approvals and Checks](https://docs.microsoft.com/en-us/azure/devops/pipelines/process/approvals?view=azure-devops&tabs=check-pass) for the stages on your environment for better experience of the model testing. The environment ***abtestenv*** will be added automatically to your AzDo project after the first pipeline run. At each stage you can verify how the traffic is routed sending requests to $GATEWAY_IP/score with ***Postman*** or with ***curl***: diff --git a/ml_service/util/create_scoring_image.py b/ml_service/util/create_scoring_image.py index 41ff635c..0177ee72 100644 --- a/ml_service/util/create_scoring_image.py +++ b/ml_service/util/create_scoring_image.py @@ -28,14 +28,16 @@ sources_dir = e.sources_directory_train if (sources_dir is None): sources_dir = 'diabetes_regression' -path_to_scoring = os.path.join(".", sources_dir, "scoring") +score_script = os.path.join(".", sources_dir, e.score_script) +score_file = os.path.basename(score_script) +path_to_scoring = os.path.dirname(score_script) cwd = os.getcwd() # Copy conda_dependencies.yml into scoring as this method does not accept relative paths. # NOQA: E501 shutil.copy(os.path.join(".", sources_dir, "conda_dependencies.yml"), path_to_scoring) os.chdir(path_to_scoring) image_config = ContainerImage.image_configuration( - execution_script=e.score_script, + execution_script=score_file, runtime="python", conda_file="conda_dependencies.yml", description="Image with ridge regression model", diff --git a/ml_service/util/env_variables.py b/ml_service/util/env_variables.py index 2386a5b3..1bd69529 100644 --- a/ml_service/util/env_variables.py +++ b/ml_service/util/env_variables.py @@ -36,7 +36,6 @@ def __init__(self): self._experiment_name = os.environ.get("EXPERIMENT_NAME") self._model_version = os.environ.get('MODEL_VERSION') self._image_name = os.environ.get('IMAGE_NAME') - self._model_path = os.environ.get('MODEL_PATH') self._db_cluster_id = os.environ.get("DB_CLUSTER_ID") self._score_script = os.environ.get("SCORE_SCRIPT") self._collection_uri = os.environ.get("SYSTEM_COLLECTIONURI") @@ -134,10 +133,6 @@ def model_version(self): def image_name(self): return self._image_name - @property - def model_path(self): - return self._model_path - @property def score_script(self): return self._score_script