From dbad9772ba80636f274cecf017ad95f7c7043ade Mon Sep 17 00:00:00 2001 From: JS Date: Wed, 9 Oct 2019 13:17:20 -0400 Subject: [PATCH] Staging (#365) * README updates (#358) * Updating environment.yml file in master (#323) * readme updates * mv media to scenarios folder * fixes * Update README.md * simplification of language, removing redundancy * added target audience section * Update SETUP.md * Update README.md * Update environment.yml * Update SETUP.md * env-update (#359) * Hyperdrive notebook updates (#356) All tests are passing (except for unrelated AML deployment notebooks) --- README.md | 100 ++----- SETUP.md | 121 ++++++++ scenarios/README.md | 45 +++ scenarios/classification/README.md | 28 -- ...exploring_hyperparameters_on_azureml.ipynb | 274 ++++++++++++------ .../detection/media/hyperdrive_cluster.jpg | Bin 0 -> 65763 bytes {media => scenarios/media}/figures.pptx | Bin {media => scenarios/media}/intro_ic_vis.jpg | Bin {media => scenarios/media}/intro_is_vis.jpg | Bin {media => scenarios/media}/intro_iseg_vis.jpg | Bin {media => scenarios/media}/intro_od_vis.jpg | Bin tests/smoke/test_azureml_notebooks.py | 46 ++- 12 files changed, 402 insertions(+), 212 deletions(-) create mode 100644 SETUP.md create mode 100644 scenarios/detection/media/hyperdrive_cluster.jpg rename {media => scenarios/media}/figures.pptx (100%) rename {media => scenarios/media}/intro_ic_vis.jpg (100%) rename {media => scenarios/media}/intro_is_vis.jpg (100%) rename {media => scenarios/media}/intro_iseg_vis.jpg (100%) rename {media => scenarios/media}/intro_od_vis.jpg (100%) diff --git a/README.md b/README.md index 86fffd6..42fbc02 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,26 @@ # Computer Vision -In recent years, we see an extra-ordinary growth in Computer Vision, with applications in face recognition, image understanding, search, drones, mapping, semi-autonomous and autonomous vehicles. Key essence to many of these applications are visual recognition tasks such as image classification, object detection and image similarity. Researchers have been applying newer deep learning methods to achieve state-of-the-art(SOTA) results on these challenging visual recognition tasks. +In recent years, we've see an extra-ordinary growth in Computer Vision, with applications in face recognition, image understanding, search, drones, mapping, semi-autonomous and autonomous vehicles. A key part to many of these applications are visual recognition tasks such as image classification, object detection and image similarity. -This repository provides examples and best practice guidelines for building computer vision systems. The focus of the repository is on state-of-the-art methods that are popular among researchers and practitioners working on problems involving image recognition, object detection and image similarity. - -These examples are provided as Jupyter notebooks and common utility functions. All examples use PyTorch as the deep learning library. - - -## Overview - -The goal of this repository is to accelerate the development of computer vision applications. Rather than creating implementions from scratch, the focus is on providing examples and links to existing state-of-the-art libraries. In addition, having worked in this space for many years, we aim to answer common questions, point out frequently observed pitfalls, and show how to use the cloud for training and deployment. +This repository provides examples and best practice guidelines for building computer vision systems. The goal of this repository is to build a comprehensive set of tools and examples that leverage recent advances in Computer Vision algorithms, neural architectures, and operationalizing such systems. Rather than creating implementions from scratch, we draw from existing state-of-the-art libraries and build additional utility around loading image data, optimizing and evaluating models, and scaling up to the cloud. In addition, having worked in this space for many years, we aim to answer common questions, point out frequently observed pitfalls, and show how to use the cloud for training and deployment. We hope that these examples and utilities can significantly reduce the “time to market” by simplifying the experience from defining the business problem to development of solution by orders of magnitude. In addition, the example notebooks would serve as guidelines and showcase best practices and usage of the tools in a wide variety of languages. +These examples are provided as [Jupyter notebooks](scenarios) and common [utility functions](utils_cv). All examples use PyTorch as the underlying deep learning library. + +## Target Audience + +Our target audience for this repository includes data scientists and machine learning engineers with varying levels of Computer Vision knowledge as our content is source-only and targets custom machine learning modelling. The utilities and examples provided are intended to be solution accelerators for real-world vision problems. + +## Get Started + +To get started, navigate to the [Setup Guide](SETUP.md), which lists +instructions on how to setup the compute environment and dependencies needed to run the +notebooks in this repo. Once your environment is setup, navigate to the +[Scenarios](scenarios) folder and start exploring the notebooks. + ## Scenarios -The following is a summary of commonly used Computer Vision scenarios that are covered in this repository. For each of these scenarios, we give you the tools to effectively build your own model. This includes tasks such as fine-tuning your own model on your own data, to more complex tasks such as hard-negative mining and even model deployment. See all supported scenarios [here](scenarios). +The following is a summary of commonly used Computer Vision scenarios that are covered in this repository. For each of these scenarios, we give you the tools to effectively build your own model. This includes simple tasks such as fine-tuning your own model on your own data, to more complex tasks such as hard-negative mining and even model deployment. See all supported scenarios [here](scenarios). | Scenario | Description | | -------- | ----------- | @@ -22,50 +28,7 @@ The following is a summary of commonly used Computer Vision scenarios that are c | [Similarity](scenarios/similarity) | Image Similarity is a way to compute a similarity score given a pair of images. Given an image, it allows you to identify the most similar image in a given dataset. | | [Detection](scenarios/detection) | Object Detection is a supervised machine learning technique that allows you to detect the bounding box of an object within an image. | -## Getting Started -To get started: - -1. (Optional) Create an Azure Data Science Virtual Machine with e.g. a V100 GPU ([instructions](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/provision-deep-learning-dsvm), [price table](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/windows/)). -1. Install Anaconda with Python >= 3.6. [Miniconda](https://conda.io/miniconda.html). This step can be skipped if working on a Data Science Virtual Machine. -1. Clone the repository - ``` - git clone https://github.com/Microsoft/ComputerVision - ``` -1. Install the conda environment, you'll find the `environment.yml` file in the root directory. To build the conda environment: - > If you are using Windows, remove `- pycocotools>=2.0` from the `environment.yaml` - ``` - conda env create -f environment.yml - ``` -1. Activate the conda environment and register it with Jupyter: - ``` - conda activate cv - python -m ipykernel install --user --name cv --display-name "Python (cv)" - ``` - If you would like to use [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/), install `jupyter-webrtc` widget: - ``` - jupyter labextension install jupyter-webrtc - ``` - > If you are using Windows run at this point: - > - `pip install Cython` - > - `pip install git+https://github.com/philferriere/cocoapi.git#egg=pycocotools^&subdirectory=PythonAPI` -1. Start the Jupyter notebook server - ``` - jupyter notebook - ``` -1. At this point, you should be able to run the [notebooks](#scenarios) in this repo. - -As an alternative to the steps above, and if one wants to install only -the 'utils_cv' library (without creating a new conda environment), -this can be done by running - -```bash -pip install git+https://github.com/microsoft/ComputerVision.git@master#egg=utils_cv -``` - -or by downloading the repo and then running `pip install .` in the -root directory. - -## Introduction +## Computer Vision on Azure Note that for certain computer vision problems, you may not need to build your own models. Instead, pre-built or easily customizable solutions exist which do not require any custom coding or machine learning expertise. We strongly recommend evaluating if these can sufficiently solve your problem. If these solutions are not applicable, or the accuracy of these solutions is not sufficient, then resorting to more complex and time-consuming custom approaches may be necessary. @@ -77,8 +40,6 @@ are a set of pre-trained REST APIs which can be called for image tagging, face r - [Custom Vision](https://azure.microsoft.com/en-us/services/cognitive-services/custom-vision-service/) is a SaaS service to train and deploy a model as a REST API given a user-provided training set. All steps including image upload, annotation, and model deployment can be performed using either the UI or a Python SDK. Training image classification or object detection models can be achieved with minimal machine learning expertise. The Custom Vision offers more flexibility than using the pre-trained cognitive services APIs, but requires the user to bring and annotate their own data. -## Build Your Own Computer Vision Model - If you need to train your own model, the following services and links provide additional information that is likely useful. - [Azure Machine Learning service (AzureML)](https://azure.microsoft.com/en-us/services/machine-learning-service/) @@ -87,27 +48,6 @@ is a service that helps users accelerate the training and deploying of machine l - [Azure AI Reference architectures](https://docs.microsoft.com/en-us/azure/architecture/reference-architectures/ai/training-python-models) provide a set of examples (backed by code) of how to build common AI-oriented workloads that leverage multiple cloud components. While not computer vision specific, these reference architectures cover several machine learning workloads such as model deployment or batch scoring. - -## Computer Vision Domains - -Most applications in computer vision (CV) fall into one of these 4 categories: - -- **Image classification**: Given an input image, predict what object is present in the image. This is typically the easiest CV problem to solve, however classification requires objects to be reasonably large in the image. - -       Image classification visualization - -- **Object Detection**: Given an input image, identify and locate which objects are present (using rectangular coordinates). Object detection can find small objects in an image. Compared to image classification, both model training and manually annotating images is more time-consuming in object detection, since both the label and location are required. - -       Object detect visualization - -- **Image Similarity** Given an input image, find all similar objects in images from a reference dataset. Here, rather than predicting a label and/or rectangle, the task is to sort through a reference dataset to find objects similar to that found in the query image. - -       Image similarity visualization - -- **Image Segmentation** Given an input image, assign a label to every pixel (e.g., background, bottle, hand, sky, etc.). In practice, this problem is less common in industry, in large part due to time required to label the ground truth segmentation required in order to train a solution. - -       Image segmentation visualization - ## Build Status ### VM Testing @@ -133,11 +73,5 @@ Most applications in computer vision (CV) fall into one of these 4 categories: ## Contributing This project welcomes contributions and suggestions. Please see our [contribution guidelines](CONTRIBUTING.md). -## Data/Telemetry -The Azure Machine Learning image classification notebooks ([20_azure_workspace_setup](classification/notebooks/20_azure_workspace_setup.ipynb), [21_deployment_on_azure_container_instances](classification/notebooks/21_deployment_on_azure_container_instances.ipynb), [22_deployment_on_azure_kubernetes_service](classification/notebooks/22_deployment_on_azure_kubernetes_service.ipynb), [23_aci_aks_web_service_testing](classification/notebooks/23_aci_aks_web_service_testing.ipynb), and [24_exploring_hyperparameters_on_azureml](classification/notebooks/24_exploring_hyperparameters_on_azureml.ipynb)) collect browser usage data and send it to Microsoft to help improve our products and services. Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement). -To opt out of tracking, please go to the raw `.ipynb` files and remove the following line of code (the URL will be slightly different depending on the file): -```sh - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/ComputerVision/classification/notebooks/21_deployment_on_azure_container_instances.png)" -``` diff --git a/SETUP.md b/SETUP.md new file mode 100644 index 0000000..07fc8d3 --- /dev/null +++ b/SETUP.md @@ -0,0 +1,121 @@ +# Setup Guide + +This document describes how to setup all the dependencies to run the notebooks +in this repository. + +Many computer visions scenarios are extremely computationlly heavy. Training a +model often requires a machine that has a GPU, and would otherwise be too slow. +We recommend using the GPU-enabled [Azure Data Science Virtual Machine (DSVM)](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/) since it comes prepared with a lot of the prerequisites needed to efficiently do computer vision. + +To scale up or to operationalize your models, we recommend setting up [Azure +ML](https://docs.microsoft.com/en-us/azure/machine-learning/). Our notebooks +provide instructions on how to use it. + + +## Table of Contents + +1. [Compute Environment](#compute-environments) +1. [System Requirements](#system-requirements) +1. [Installation](#installation) +1. [Tunneling](#tunneling) + +## Compute Environments + +Most computer vision scenarios require a GPU, especially if you're training a +custom model. We recommend using a virtual machine to run the notebooks on. +Specifically, we'll want one with a powerful GPU. The NVIDIA's Tesla V100 is a +good choice that can be found in most Azure regions. + +The easiest way to get started is to use the [Azure Data Science Virtual Machine (DSVM)](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/). This VM will come installed with all the system requirements that are needed to run the notebooks in this repository. If you choose this option, you can skip the [System Requirements](#system-requirements) step in this guide as those requirements come pre-installed on the DSVM. + +Here are some ways you can create the DSVM: + +__Provision a Data Science VM with the Azure Portal or CLI__ + +You can also spin up a Data Science VM directly using the Azure portal. To do so, follow +[this](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro) +link that shows you how to provision your Data Science VM through the portal. + +You can alternatively use the Azure command line (CLI) as well. Follow +[this](https://docs.microsoft.com/en-us/cli/azure/azure-cli-vm-tutorial?view=azure-cli-latest) +link to learn more about the Azure CLI and how it can be used to provision +resources. + +__Virtual Machine Builder__ + +One easy way to create your DSVM is to use the [VM Builder](../contrib/vm_builder) tool located inside of the 'contrib' folder in the root directory of the repo. Simply run `python contrib/vm_builder/vm_builder.py` at the root level of the repo and this tool will preconfigure your virtual machine with the appropriate settings for working with this repository. + +## System Requirement + +__Requirements__ + +* A machine running Linux >= 16.04 LTS or Windows +* Miniconda or Anaconda with Python version >= 3.6. + * This is pre-installed on Azure DSVM such that one can run the following steps directly. To setup on your local machine, [Miniconda](https://docs.conda.io/en/latest/miniconda.html) is a quick way to get started. + * It is recommended to update conda to the latest version: `conda update -n base -c defaults conda` + +> NOTE: For Image Classification, Windows is up to 10x slower in training than Linux. You can set `num_workers=0`, but even still it will be up to 2x slower. + +> NOTE: For Object Detection, Windows is about 20% slower in training but about same speed for inference. + +__Dependencies__ + +Make sure you have CUDA Toolkit version 9.0 or above installed on your machine. You can run the command below in your terminal to check. + +``` +nvcc --version +``` + +If you don't have CUDA Toolkit or don't have the right version, please download it from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit) + +## Installation +To install the repo and its dependencies perform the following steps: + +1. Install Anaconda with Python >= 3.6. [Miniconda](https://conda.io/miniconda.html). This step can be skipped if working on a Data Science Virtual Machine. +1. Clone the repository + ``` + git clone https://github.com/Microsoft/ComputerVision + ``` +1. Install the conda environment, you'll find the `environment.yml` file in the root directory. To build the conda environment: + ``` + conda env create -f environment.yml + ``` +1. Activate the conda environment and register it with Jupyter: + ``` + conda activate cv + python -m ipykernel install --user --name cv --display-name "Python (cv)" + ``` +1. Start the Jupyter notebook server + ``` + jupyter notebook + ``` +1. At this point, you should be able to run the [notebooks](#scenarios) in this repo. + +__pip install__ + +As an alternative to the steps above, and if you only want to install +the 'utils_cv' library (without creating a new conda environment), +this can be done using pip install: + +```bash +pip install git+https://github.com/microsoft/ComputerVision.git@master#egg=utils_cv +``` + +> NOTE: if you install this repo using this method, you will not have the notebooks loaded by default. + +## Tunneling + +If your compute environment is on a VM in the cloud, you can open a tunnel from your VM to your local machine using the following command: +``` +$ssh -L local_port:remote_address:remote_port @ +``` + +For example, if I want to run `jupyter notebook --port 8888` on my VM and I +wish to run the Jupyter notebooks on my local broswer on `localhost:9999`, I +would ssh into my VM using the following commend: +``` +$ssh -L 9999:localhost:8888 @ +``` + +This command will allow your local machine's port 9999 to access your remote +machine's port 8888. diff --git a/scenarios/README.md b/scenarios/README.md index e69de29..b64d240 100644 --- a/scenarios/README.md +++ b/scenarios/README.md @@ -0,0 +1,45 @@ +# Overview + +| Scenario | Description | +| -------- | ----------- | +| [Classification](classification) | Image Classification is a supervised machine learning technique that allows you to learn and predict the category of a given image. | +| [Similarity](similarity) | Image Similarity is a way to compute a similarity score given a pair of images. Given an image, it allows you to identify the most similar image in a given dataset. | +| [Detection](detection) | Object Detection is a supervised machine learning technique that allows you to detect the bounding box of an object within an image. | + +# Scenarios + +While the field of Computer Vision is growing rapidly, the majority of vision applications fall into one of these 4 categories: + +- **Image classification**: Given an input image, predict what object is present in the image. This is typically the easiest CV problem to solve, however classification requires objects to be reasonably large in the image. + +       Image classification visualization + +- **Object Detection**: Given an input image, identify and locate which objects are present (using rectangular coordinates). Object detection can find small objects in an image. Compared to image classification, both model training and manually annotating images is more time-consuming in object detection, since both the label and location are required. + +       Object detect visualization + +- **Image Similarity** Given an input image, find all similar objects in images from a reference dataset. Here, rather than predicting a label and/or rectangle, the task is to sort through a reference dataset to find objects similar to that found in the query image. + +       Image similarity visualization + +- **Image Segmentation** Given an input image, assign a label to every pixel (e.g., background, bottle, hand, sky, etc.). In practice, this problem is less common in industry, in large part due to time required to label the ground truth segmentation required in order to train a solution. + +       Image segmentation visualization + +# Data/Telemetry + +The following notebooks collect browser usage data and sends it to Microsoft to help improve our product and services: +- [classification/20_azure_workspace_setup](classification/20_azure_workspace_setup.ipynb) +- [classification/21_deployment_on_azure_container_instances](classification/21_deployment_on_azure_container_instances.ipynb) +- [classification/22_deployment_on_azure_kubernetes_service](classification/22_deployment_on_azure_kubernetes_service.ipynb) +- [classification/23_aci_aks_web_service_testing](classification/23_aci_aks_web_service_testing.ipynb) +- [classification/24_exploring_hyperparameters_on_azureml](classification/24_exploring_hyperparameters_on_azureml.ipynb) +- [detection/11_exploring_hyperparameters_on_azureml](detection/11_exploring_hyperparameters_on_azureml.ipynb) + +Read Microsoft's [privacy statement to learn more](https://privacy.microsoft.com/en-US/privacystatement). + +To opt out of tracking, please go to the raw `.ipynb` files and remove the following line of code (the URL will be slightly different depending on the file): + +```sh + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/ComputerVision/classification/notebooks/21_deployment_on_azure_container_instances.png)" +``` diff --git a/scenarios/classification/README.md b/scenarios/classification/README.md index e83744c..263be6d 100644 --- a/scenarios/classification/README.md +++ b/scenarios/classification/README.md @@ -33,34 +33,6 @@ We have also found that some browsers do not render Jupyter widgets correctly. I | [23_aci_aks_web_service_testing.ipynb](23_aci_aks_web_service_testing.ipynb)| Tests the deployed models on either ACI or AKS. | | [24_exploring_hyperparameters_on_azureml.ipynb](24_exploring_hyperparameters_on_azureml.ipynb)| Performs highly parallel parameter sweeping using AzureML's HyperDrive. | - -## Using a Virtual Machine - -You may want to use a virtual machine to run the notebooks. Doing so will give you a lot more flexibility -- whether it is using a GPU enabled machine or simply working in Linux. - -__Data Science Virtual Machine Builder__ - -One easy way to create your VM is to use the 'create_dsvm.py' tool located inside of the 'tools' folder in the root directory of the repo. Simply run `python tools/create_dsvm.py` at the root level of the repo. This tool preconfigures your virtual machine with the appropriate settings for working with this repository. - -__Using the Azure Portal or CLI__ - -You can also spin up a VM directly using the Azure portal. For this repository, -you will want to create a Data Science Virtual Machine (DSVM). To do so, follow -[this](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro) -link that shows you how to provision your VM through the portal. - -You can alternatively use the Azure command line (CLI) as well. Follow -[this](https://docs.microsoft.com/en-us/cli/azure/azure-cli-vm-tutorial?view=azure-cli-latest) -link to learn more about the Azure CLI and how it can be used to provision -resources. - -Once your virtual machine has been created, ssh and tunnel into the machine, then run the "Getting started" steps inside of it. The 'create_dsvm' tool will show you how to properly perform the tunneling too. If you created your virtual machine using the portal or the CLI, you can tunnel your jupyter notebook ports using the following command: -``` -$ssh -L local_port:remote_address:remote_port username@server.com -``` - - - ## Azure-enhanced notebooks Azure products and services are used in certain notebooks to enhance the efficiency of developing classification systems at scale. diff --git a/scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb b/scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb index 100e293..d0b98e0 100644 --- a/scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb +++ b/scenarios/detection/11_exploring_hyperparameters_on_azureml.ipynb @@ -41,7 +41,9 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "import os\n", @@ -49,6 +51,7 @@ "from distutils.dir_util import copy_tree\n", "import numpy as np\n", "import scrapbook as sb\n", + "import uuid\n", "\n", "import azureml.core\n", "from azureml.core import Workspace, Experiment\n", @@ -62,6 +65,7 @@ "import azureml.widgets as widgets\n", "\n", "sys.path.append(\"../../\")\n", + "from utils_cv.common.azureml import get_or_create_workspace\n", "from utils_cv.common.data import unzip_url\n", "from utils_cv.detection.data import Urls" ] @@ -76,7 +80,9 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "%reload_ext autoreload\n", @@ -93,8 +99,9 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 3, "metadata": { + "collapsed": true, "tags": [ "parameters" ] @@ -108,15 +115,18 @@ "workspace_region = \"YOUR_WORKSPACE_REGION\" #Possible values eastus, eastus2, etc.\n", "\n", "# Choose a size for our cluster and the maximum number of nodes\n", - "VM_SIZE = \"STANDARD_NC6\" #\"STANDARD_NC6\", STANDARD_NC6S_V3\"\n", - "MAX_NODES = 10\n", + "VM_SIZE = \"STANDARD_NC6\" #STANDARD_NC6S_V3\"\n", + "MAX_NODES = 8\n", "\n", "# Hyperparameter grid search space\n", - "IM_MAX_SIZES = [100,200] #Default is 1333 pixels, defining small values here to speed up training\n", - "LEARNING_RATES = np.linspace(1e-2, 1e-5, 4).tolist()\n", + "IM_MAX_SIZES = [600] #Default is 1333 pixels, defining small values here to speed up training\n", + "LEARNING_RATES = [1e-4, 3e-4, 1e-3, 3e-3, 1e-2]\n", "\n", "# Image data\n", - "DATA_PATH = unzip_url(Urls.fridge_objects_path, exist_ok=True)" + "DATA_PATH = unzip_url(Urls.fridge_objects_path, exist_ok=True)\n", + "\n", + "# Path to utils_cv library\n", + "UTILS_DIR = os.path.join('..', '..', 'utils_cv')" ] }, { @@ -132,22 +142,23 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "from utils_cv.common.azureml import get_or_create_workspace\n", - "\n", "ws = get_or_create_workspace(\n", - " subscription_id,\n", - " resource_group,\n", - " workspace_name,\n", - " workspace_region)\n", + " subscription_id, resource_group, workspace_name, workspace_region\n", + ")\n", "\n", "# Print the workspace attributes\n", - "print('Workspace name: ' + ws.name, \n", - " 'Workspace region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep = '\\n')" + "print(\n", + " \"Workspace name: \" + ws.name,\n", + " \"Workspace region: \" + ws.location,\n", + " \"Subscription id: \" + ws.subscription_id,\n", + " \"Resource group: \" + ws.resource_group,\n", + " sep=\"\\n\",\n", + ")" ] }, { @@ -169,8 +180,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-08-30T16:15:49.268000+00:00', 'errors': None, 'creationTime': '2019-08-30T14:31:48.860219+00:00', 'modifiedTime': '2019-08-30T14:32:04.865042+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 10, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "Creating a new compute target...\n", + "Creating\n", + "Succeeded\n", + "AmlCompute wait for completion finished\n", + "Minimum number of nodes requested have been provisioned\n", + "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-09-30T18:20:25.067000+00:00', 'errors': None, 'creationTime': '2019-09-30T18:18:06.217384+00:00', 'modifiedTime': '2019-09-30T18:20:38.458332+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 8, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], @@ -180,23 +195,31 @@ "try:\n", " # Retrieve if a compute target with the same cluster name already exists\n", " compute_target = ComputeTarget(workspace=ws, name=CLUSTER_NAME)\n", - " print('Found existing compute target.')\n", - " \n", + " print(\"Found existing compute target.\")\n", + "\n", "except ComputeTargetException:\n", " # If it doesn't already exist, we create a new one with the name provided\n", - " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size=VM_SIZE,\n", - " min_nodes=0,\n", - " max_nodes=MAX_NODES)\n", + " print(\"Creating a new compute target...\")\n", + " compute_config = AmlCompute.provisioning_configuration(\n", + " vm_size=VM_SIZE, min_nodes=0, max_nodes=MAX_NODES\n", + " )\n", "\n", " # create the cluster\n", " compute_target = ComputeTarget.create(ws, CLUSTER_NAME, compute_config)\n", " compute_target.wait_for_completion(show_output=True)\n", "\n", - "# we can use get_status() to get a detailed status for the current cluster. \n", + "# we can use get_status() to get a detailed status for the current cluster.\n", "print(compute_target.get_status().serialize())" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The compute cluster and its status can be seen in the portal. For example in the screenshot below, its automatically resizing (eventually to 0 nodes) to adjust to the number of open runs:\n", + "\"Compute" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -209,6 +232,7 @@ "cell_type": "code", "execution_count": null, "metadata": { + "collapsed": true, "scrolled": true }, "outputs": [], @@ -216,12 +240,10 @@ "# Retrieving default datastore that got automatically created when we setup a workspace\n", "ds = ws.get_default_datastore()\n", "\n", - "# We now upload the data to the 'data' folder on the Azure portal\n", + "# We now upload the data to a unique sub-folder to avoid accidentially training/evaluating also including older images.\n", + "data_subfolder = str(uuid.uuid4())\n", "ds.upload(\n", - " src_dir=DATA_PATH,\n", - " target_path='data',\n", - " overwrite=True, # overwrite data if it already exists on the Azure blob storage\n", - " show_progress=True\n", + " src_dir=DATA_PATH, target_path=data_subfolder, overwrite=False, show_progress=True\n", ")" ] }, @@ -240,26 +262,28 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, + "execution_count": 7, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Create a folder for the training script and copy the utils_cv library into that folder\n", "script_folder = os.path.join(os.getcwd(), \"hyperdrive\")\n", "os.makedirs(script_folder, exist_ok=True)\n", - "_ = copy_tree(os.path.join('..', '..', 'utils_cv'), os.path.join(script_folder, 'utils_cv'))" + "_ = copy_tree(UTILS_DIR, os.path.join(script_folder, 'utils_cv'))" ] }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Overwriting C:\\Users\\pabuehle\\Desktop\\ComputerVision\\detection\\notebooks\\hyperdrive/train.py\n" + "Overwriting C:\\Users\\pabuehle\\Desktop\\ComputerVision\\scenarios\\detection\\hyperdrive/train.py\n" ] } ], @@ -282,12 +306,12 @@ "from utils_cv.common.gpu import which_processor\n", "which_processor()\n", "\n", - "\n", "# Parse arguments passed by Hyperdrive\n", "parser = argparse.ArgumentParser()\n", "parser.add_argument('--data-folder', type=str, dest='data_dir')\n", - "parser.add_argument('--epochs', type=int, dest='epochs', default=10)\n", - "parser.add_argument('--batch_size', type=int, dest='batch_size', default=1)\n", + "parser.add_argument('--data-subfolder', type=str, dest='data_subfolder')\n", + "parser.add_argument('--epochs', type=int, dest='epochs', default=20) \n", + "parser.add_argument('--batch_size', type=int, dest='batch_size', default=2)\n", "parser.add_argument('--learning_rate', type=float, dest='learning_rate', default=1e-4)\n", "parser.add_argument('--min_size', type=int, dest='min_size', default=800)\n", "parser.add_argument('--max_size', type=int, dest='max_size', default=1333)\n", @@ -303,9 +327,10 @@ "params = vars(args)\n", "print(f\"params = {params}\")\n", "\n", - "# Getting training and validation data\n", - "path = os.path.join(params['data_dir'], \"data\")\n", - "data = DetectionDataset(path, train_pct=0.5, batch_size = params[\"batch_size\"])\n", + "# Get training and validation data\n", + "data_path = os.path.join(params['data_dir'], params[\"data_subfolder\"])\n", + "print(f\"data_path={data_path}\")\n", + "data = DetectionDataset(data_path, train_pct=0.5, batch_size = params[\"batch_size\"])\n", "print(\n", " f\"Training dataset: {len(data.train_ds)} | Training DataLoader: {data.train_dl} \\n \\\n", " Testing dataset: {len(data.test_ds)} | Testing DataLoader: {data.test_dl}\"\n", @@ -313,7 +338,7 @@ "\n", "# Get model\n", "model = get_pretrained_fasterrcnn(\n", - " num_classes = len(data.labels),\n", + " num_classes = len(data.labels)+1,\n", " min_size = params[\"min_size\"],\n", " max_size = params[\"max_size\"],\n", " rpn_pre_nms_top_n_train = params[\"rpn_pre_nms_top_n_train\"],\n", @@ -331,9 +356,12 @@ "detector.fit(params[\"epochs\"], lr=params[\"learning_rate\"], print_freq=30)\n", "print(f\"Average precision after each epoch: {detector.ap}\")\n", "\n", + "# Get accuracy on test set at IOU=0.5:0.95\n", + "acc = float(detector.ap[-1])\n", + "\n", "# Add log entries\n", "run = Run.get_context()\n", - "run.log(\"accuracy\", float(detector.ap[-1])) # Logging our primary metric 'accuracy'\n", + "run.log(\"accuracy\", float(acc)) # Logging our primary metric 'accuracy'\n", "run.log(\"data_dir\", params[\"data_dir\"])\n", "run.log(\"epochs\", params[\"epochs\"])\n", "run.log(\"batch_size\", params[\"batch_size\"])\n", @@ -362,11 +390,13 @@ }, { "cell_type": "code", - "execution_count": 36, - "metadata": {}, + "execution_count": 9, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "exp = Experiment(workspace=ws, name='hyperparameter-tuning')" + "exp = Experiment(workspace=ws, name=\"hyperparameter-tuning\")" ] }, { @@ -382,15 +412,15 @@ }, { "cell_type": "code", - "execution_count": 37, - "metadata": {}, + "execution_count": 10, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Grid-search\n", - "param_sampling = GridParameterSampling( {\n", - " '--learning_rate': choice(LEARNING_RATES),\n", - " '--max_size': choice(IM_MAX_SIZES)\n", - " }\n", + "param_sampling = GridParameterSampling(\n", + " {\"--learning_rate\": choice(LEARNING_RATES), \"--max_size\": choice(IM_MAX_SIZES)}\n", ")" ] }, @@ -404,21 +434,28 @@ }, { "cell_type": "code", - "execution_count": 38, - "metadata": {}, + "execution_count": 11, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ - "script_params = {\n", - " '--data-folder': ds.as_mount()\n", - "}\n", + "script_params = {\"--data-folder\": ds.as_mount(), \"--data-subfolder\": data_subfolder}\n", "\n", - "est = Estimator(source_directory=script_folder,\n", - " script_params=script_params,\n", - " compute_target=compute_target,\n", - " entry_script='train.py',\n", - " use_gpu=True,\n", - " pip_packages=['nvidia-ml-py3','fastai'],\n", - " conda_packages=['scikit-learn', 'pycocotools>=2.0','torchvision==0.3','cudatoolkit==9.0'])" + "est = Estimator(\n", + " source_directory=script_folder,\n", + " script_params=script_params,\n", + " compute_target=compute_target,\n", + " entry_script=\"train.py\",\n", + " use_gpu=True,\n", + " pip_packages=[\"nvidia-ml-py3\", \"fastai\"],\n", + " conda_packages=[\n", + " \"scikit-learn\",\n", + " \"pycocotools>=2.0\",\n", + " \"torchvision==0.3\",\n", + " \"cudatoolkit==9.0\",\n", + " ],\n", + ")" ] }, { @@ -430,18 +467,20 @@ }, { "cell_type": "code", - "execution_count": 39, - "metadata": {}, + "execution_count": 12, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "hyperdrive_run_config = HyperDriveConfig(\n", " estimator=est,\n", " hyperparameter_sampling=param_sampling,\n", - " policy=None, # Do not use any early termination \n", - " primary_metric_name='accuracy',\n", + " policy=None, # Do not use any early termination\n", + " primary_metric_name=\"accuracy\",\n", " primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,\n", - " max_total_runs=None, # Set to none to run all possible grid parameter combinations,\n", - " max_concurrent_runs=MAX_NODES\n", + " max_total_runs=None, # Set to none to run all possible grid parameter combinations,\n", + " max_concurrent_runs=MAX_NODES,\n", ")" ] }, @@ -460,14 +499,14 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Url to hyperdrive run on the Azure portal: https://mlworkspace.azure.ai/portal/subscriptions/2ad17db4-e26d-4c9e-999e-adae9182530c/resourceGroups/pabuehle_delme2_hyperdrive/providers/Microsoft.MachineLearningServices/workspaces/pabuehle_ws/experiments/hyperparameter-tuning/runs/hyperparameter-tuning_1567193416225\n" + "Url to hyperdrive run on the Azure portal: https://mlworkspace.azure.ai/portal/subscriptions/989b90f7-da4f-41f9-84c9-44848802052d/resourceGroups/pabuehle_delme2_hyperdrive/providers/Microsoft.MachineLearningServices/workspaces/pabuehle_ws/experiments/hyperparameter-tuning/runs/hyperparameter-tuning_1569867670036119\n" ] } ], @@ -478,13 +517,13 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "c80070535f744b8aab68560b31aa38fe", + "model_id": "0f08d8354768463788969180b5d031ab", "version_major": 2, "version_minor": 0 }, @@ -502,27 +541,27 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'runId': 'hyperparameter-tuning_1567190769563',\n", + "{'runId': 'hyperparameter-tuning_1569867670036119',\n", " 'target': 'gpu-cluster',\n", - " 'status': 'Canceled',\n", - " 'startTimeUtc': '2019-08-30T18:46:09.79512Z',\n", - " 'endTimeUtc': '2019-08-30T19:21:47.165873Z',\n", + " 'status': 'Completed',\n", + " 'startTimeUtc': '2019-09-30T18:21:10.209419Z',\n", + " 'endTimeUtc': '2019-09-30T18:55:14.128089Z',\n", " 'properties': {'primary_metric_config': '{\"name\": \"accuracy\", \"goal\": \"maximize\"}',\n", " 'runTemplate': 'HyperDrive',\n", " 'azureml.runsource': 'hyperdrive',\n", " 'platform': 'AML',\n", " 'baggage': 'eyJvaWQiOiAiNWFlYTJmMzAtZjQxZC00ZDA0LWJiOGUtOWU0NGUyZWQzZGQ2IiwgInRpZCI6ICI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCAidW5hbWUiOiAiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2In0',\n", - " 'ContentSnapshotId': '348bdd53-a99f-4ddd-8ab3-a727cd12bdba'},\n", - " 'logFiles': {'azureml-logs/hyperdrive.txt': 'https://pabuehlestorage779f8bc80.blob.core.windows.net/azureml/ExperimentRun/dcid.hyperparameter-tuning_1567190769563/azureml-logs/hyperdrive.txt?sv=2018-11-09&sr=b&sig=xLa2nd2%2BFQxDmg7tQGBScePCocDYJEayFyf9MIIPO8Y%3D&st=2019-08-30T19%3A11%3A48Z&se=2019-08-31T03%3A21%3A48Z&sp=r'}}" + " 'ContentSnapshotId': '0218d18a-3557-4fdf-8c29-8d43297621ed'},\n", + " 'logFiles': {'azureml-logs/hyperdrive.txt': 'https://pabuehlestorage579709b90.blob.core.windows.net/azureml/ExperimentRun/dcid.hyperparameter-tuning_1569867670036119/azureml-logs/hyperdrive.txt?sv=2018-11-09&sr=b&sig=PCMArksPFcTc1rk1DMhFP6wvoZbhrpmnZbDCV8uInWw%3D&st=2019-09-30T18%3A45%3A14Z&se=2019-10-01T02%3A55%3A14Z&sp=r'}}" ] }, - "execution_count": 33, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -541,7 +580,7 @@ "```\n", "We also can cancel the Run with \n", "```python \n", - "hyperdrive_run_config.cancel().\n", + "hyperdrive_run.cancel().\n", "```\n", "\n", "Once all the child-runs are finished, we can get the best run and the metrics." @@ -549,22 +588,22 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "* Best Run Id:hyperparameter-tuning_1567193416225_4\n", + "* Best Run Id:hyperparameter-tuning_1569867670036119_4\n", "Run(Experiment: hyperparameter-tuning,\n", - "Id: hyperparameter-tuning_1567193416225_4,\n", + "Id: hyperparameter-tuning_1569867670036119_4,\n", "Type: azureml.scriptrun,\n", "Status: Completed)\n", "\n", "* Best hyperparameters:\n", - "{'--data-folder': '$AZUREML_DATAREFERENCE_workspaceblobstore', '--learning_rate': '0.01', '--max_size': '200'}\n", - "Accuracy = 0.8988979153074632\n", + "{'--data-folder': '$AZUREML_DATAREFERENCE_workspaceblobstore', '--data-subfolder': '01679d79-1c47-49b8-88c3-d657f36b0c0f', '--learning_rate': '0.01', '--max_size': '600'}\n", + "Accuracy = 0.8918015856432082\n", "Learning Rate = 0.01\n" ] } @@ -573,7 +612,7 @@ "# Get best run and print out metrics\n", "best_run = hyperdrive_run.get_best_run_by_primary_metric()\n", "best_run_metrics = best_run.get_metrics()\n", - "parameter_values = best_run.get_details()['runDefinition']['arguments']\n", + "parameter_values = best_run.get_details()[\"runDefinition\"][\"arguments\"]\n", "best_parameters = dict(zip(parameter_values[::2], parameter_values[1::2]))\n", "\n", "print(f\"* Best Run Id:{best_run.id}\")\n", @@ -581,7 +620,50 @@ "print(\"\\n* Best hyperparameters:\")\n", "print(best_parameters)\n", "print(f\"Accuracy = {best_run_metrics['accuracy']}\")\n", - "print(\"Learning Rate =\", best_run_metrics['learning_rate'])" + "print(\"Learning Rate =\", best_run_metrics[\"learning_rate\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'run_id': 'hyperparameter-tuning_1569867670036119_4',\n", + " 'hyperparameters': '{\"--learning_rate\": 0.01, \"--max_size\": 600}',\n", + " 'best_primary_metric': 0.8918015856432082,\n", + " 'status': 'Completed'},\n", + " {'run_id': 'hyperparameter-tuning_1569867670036119_3',\n", + " 'hyperparameters': '{\"--learning_rate\": 0.003, \"--max_size\": 600}',\n", + " 'best_primary_metric': 0.8760658534573615,\n", + " 'status': 'Completed'},\n", + " {'run_id': 'hyperparameter-tuning_1569867670036119_2',\n", + " 'hyperparameters': '{\"--learning_rate\": 0.001, \"--max_size\": 600}',\n", + " 'best_primary_metric': 0.8282478586888209,\n", + " 'status': 'Completed'},\n", + " {'run_id': 'hyperparameter-tuning_1569867670036119_1',\n", + " 'hyperparameters': '{\"--learning_rate\": 0.0003, \"--max_size\": 600}',\n", + " 'best_primary_metric': 0.7405032357605712,\n", + " 'status': 'Completed'},\n", + " {'run_id': 'hyperparameter-tuning_1569867670036119_0',\n", + " 'hyperparameters': '{\"--learning_rate\": 0.0001, \"--max_size\": 600}',\n", + " 'best_primary_metric': 0.47537724312149304,\n", + " 'status': 'Completed'},\n", + " {'run_id': 'hyperparameter-tuning_1569867670036119_preparation',\n", + " 'hyperparameters': None,\n", + " 'best_primary_metric': None,\n", + " 'status': 'Completed'}]" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hyperdrive_run.get_children_sorted_by_primary_metric()" ] }, { @@ -596,11 +678,13 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "collapsed": true + }, "outputs": [], "source": [ "# Log some outputs using scrapbook which are used during testing to verify correct notebook execution\n", - "sb.glue(\"best_accuracy\", best_run_metrics['accuracy'])" + "sb.glue(\"best_accuracy\", best_run_metrics[\"accuracy\"])" ] } ], diff --git a/scenarios/detection/media/hyperdrive_cluster.jpg b/scenarios/detection/media/hyperdrive_cluster.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6e2aa1529a4eba91e11a9e2c71d8cecf1b015b7 GIT binary patch literal 65763 zcmeFZ1z225wg^aum00##L7{mU6hh@xbX)kLF z08mr}7ytl32H@cYfk&_yChX=#xHSL|i^IXic@fQpHmi?x|8g@ilU$`0%T00{3vf6*v#V46e0 zd-#9Eo8sgC5(jZ`0YH5GKjZ(@E|AlHL-3b&@&95w`o;4v1b!j#3xQt<{6gTL2yn2m z^9!)SKHL=S>;jzJ0-QX+pFINr6~F{=16%-Wzznd3nJpv$cK{4n0d}x&m{tO_eV|cV zxw$zBu(CS1vY41Vnu1x(9PL@XOq^KRS=d+sAyF?U6Ej<|8-*#@(%M0o?y#|$j>6hp zm`;mZkxkJ_5^QBH=i>r?<)fr-=3{HdZ%!vFf{rTWCE#W6WDj;Tq42V|b8r>#5~lo1 z?gFs*Z;x3iMTA_;EdpcH{+59h*wxI%+R4q@(ShQ(3{6ZO-Q9#K-QBIt1uRS~xJ}K?xtUE(IL(>a z+09LuP0Tsjnaw#l&3U=m`8X`tO)39Yyt&ywN_TR1vHMHe=4Py5JFq?2!Oazh20JSy z>tBif-&H0IqJI?nr|`pggryOXbOD>Vfu&$UOW`ljW)&E}gEF4^1LahHLoe=A9 zVEm5I|Gkv|m(UUsGB*=2`;B%-m%kSO3hey9R@-*gBL7CU{wt%f=YNgBN`NciL8 z*54QYM>PKO1lIaszxzj9h5eG^|A*^WA^#G{zu@`>*T00ozm)h_>-q)Pzl6ZQl=xTc z`acY=f9ktn2bhxU3DagDW&ue62>}5S0Uikv0TCGq2^kd&4W?tD;$mPv#v;HaA|${i zz$Yf7dqzx3LyAv8$wEm(|D2JDk%)qola+ykj)9Tkw@l!WkdaYQP;t=Ea2QAkNErU* z>!Ag}LVolFVF?6B2|U7r17X2Ev;*X@wMhinYUXbXnt!|C9)aKy5Rs5kP|;usYA}IE za3Ihlcn|^tJZvcw&L8$3fX70>e#S12_*B&diP9N|<8|B@WGab@R$R4_BWg}lmtYiB zJbVH|A{tsc`sWN>+&sK|`~s3v(lWAg@-JSgYiMd|>*$)9gDotrtZiJ~+&w(KynRC6 zgocH`jfjj-_>h>C{P9ytR(4KqUVcGgQDs$iO)aFZzTrn(dq-zicTex=*!aZc)btE= zd1ZBNePeTLd*}G%^yk_6#pTuYZ+yW4pue*956=D#Usy1{9>K$d;E{gg3+|C8EP$}! z5uULlVvDOHnK(bC_0{k?U-`0HkX~5(AmdQM`?`>TI zV})Mrf$NnPD1VoXeZtMDR=~~``Y9^IqTj)h(|ZkaQRIJR6lcGv=o8o$J0D%2@c;;8 zXw0GE!aD%ioZaMWL?A>J^-@{SAR$5Ds#_gH$lOC;6Mxp(dGh>syGnQ&m z0jXwoCuuvB*-m>-u$B<(v)1Ifs)vDhOL|;_cMgI2pQk%eHCBv2!x}&_MU97}2 z`ob}Z`CN-c18Oj&lT}BGzv2;lK0qv6UolaNea{uvr&?dd4xN{;_A6N1q^lJ}s-TG4 z)wz5C6uyr(DHKRMdQl$DozBh|Qy#>9G3YnDs)u(Q6h*CP(Y$SzF!QF_bZpuo?U%CCk3j2V~HU(2z(`k%XH&;f?f^`7TEz;<{QSOjwR^2|`3d^T*+o=F@fq>#X z8x$)C(^$3YlYkKn#qA!xAq(Kf$+^#u{a7(J^d8B}`jD!=p{~*TOHF0fyMc`5+Zhz( z?=9H!6ma7sqxYH74?wqQ`S@88W9CtFREf|ln*Am`NQ3`MUdcuy zyh;!vgs=6DaUZr$D3RtL(vujFAychdyhJsNQ8jlm1I(HGSi4$3 zyb|DfKk^Yf!JW5$(Vg-|(qkU%ja*{7XWP=!!c%1VRHn0?^5zGzqTx<^Zq8m8TG-$c zaRnF{Z)om_rh75TozCnQ?Woc?B4zKE#1pJfMSHh&0ML`VhHO1SCq~?Kmv&IdM-!pvRK3OS~sf8=zIF+uJorI;LJ zFf!kO_Jp6Sy;-p>k#uc(fuRi%IM}e-``VQ9@*&yu zE_5zM2x?KBZKHq$#hy}!U~!FDA`BO^QDDOncK_O=c zed&LRl-XOii0*Z{CGc3l?OHzpS*Nx;7D7=W6fCfo@?5%?L!mj-QFKhKVDy9IY+rNo zZsE?vMkbn?Z167lDpaah7bx-@6z-H=)-=B?;$eFyo!Z~k*I+1`D0%c9NkM)sLD5P?T)k_Ui=j~_m!-L3Tux>=eaQFE+RUnFaHwBy{0Z8+H9u7F(#hBm%krFMcF zT2{HIzA-wnJ!8x^%uEp)Gy*P1$9|~xott>O7F-4!80n0Zl;00+$WnC0)AoiyfVlS< z-x<=#(fwrcB93Vn#@aNmO?Q(fi}Q45=R12lG$1T1GRQ+TUs34b&&sWM6a%7=v9xpn_K%S%sD)+c9|&?DUDtL{lL_H5;11}@>|+w$j}RWgBQXR-*s6> z9wjscigs>h%v%L;9UFdGwrA8Gwy%yB6#o`{y{e)x{z1QqF;wn7dM}F5L8q`1f3EX# zEYUW~>iko=;b-GaxANQr^R-!1Bb^nW%vz(fMS1p)FkZ&RBXRGH?(*R6G8!7R+LJio z_oujN*cCYNuZEF21wMXCKVG7^<`H~8H`kqED!Ea1VRVQvD7RY5?>p73x47weB97l@ z>sIq!>jB{L+Q>he)x`UbHoke(k@GnpwFO ze=6fqbnd=73ia6v61Ser@cwmj)(8ni7brWcJ(a zcqcgFJ7-N1>Pw&9_64}+iqR>LT(Z7Y@{g>CtHxU#PKYm7=_aeW-wLsp-et74V_6G? z?I|LB+v#9&Jqg#aI~#-}t4^^z_yjX+H@_8}oSp z4Bk`=73*HQuJkoySi8>56zvLB8O-i3DTQ>09Y}lf&xf7-2n8&mwcnpzYNM$ts0J^7c>x+#z9!WDxSZY)*IvR^xjrW zmss3anazoy>gz_MTo+dpZ{?@h=W7ubhU0P@Q{KHNKScOKmgwk}L0(lmxuH-zQ7d<_ zE*_es_?aIRgEC<0<=OHbR&e{+rK2olY&~7##c;@JkPBQ-@oxFnA&Emu%{%INZ@C}q z$eyd~Bq!b^z7zEh`%9)($~fkLoF@oK!{|Rg%bTC?uxE145IX9Fc$RD}JUm+9!KQW8RX4gJAF) z@^r$CHOX3I#wqmqtTsQ~ zT;KA!CQ4<|%z!5P3YHGBG!6^7uLL4lXCO`7@>}QW>IpX(?4kw4%TlXUzBi3RUrx2U zX)$e3q-ql=&5ydLFdDF~T+~)@uSn>Z+_Ve&vP4owu)5LV0n!_d?3c;6e9OK`Ud?)N zI_4?lcKCujoh{Xvn^D5y0a%?wnYx%;UEAE)98D{y9^3W3-M^QJZM1cREP0!YARJ@+ zVH{nS(Elm-LEW zcl4THKI=*1ZiWl2CckMay2`&9q@7^O=4ofTDZ})ZN(1JOhf1aMt$Z|+3JYwB+~6E) z9GMUDb#&g$;@e=NP&jWPS}&Y@Xk0voT%phq6r5Myl;2w>${($E5Ge=y@j6>N+86FN z7&br$CZb&5#v8GJthU;3p`uFYGP2q7Ewt^lg7pqV^?)K5Xbi_f;G^%7q zlOf=OpBlYs>1otd_Q)%+t=j|ey6*~OsQi}r-ZFINR_GY~0JOgKBQ4lUnw;{%o%6(t zb@qsFE!E=22n{wg+=)YsJ@Iml0-)~9Wuxy@hn2TRfo{D^)VQzbhUgV6U!#_n`< zF$g}mxLTf$&9}T~!6e80YOoYS40XMYhUNO0`9%GQ>tn=?|nKRfPmNs09R%<^J*9W?ppL#L#psc zAoj@>$11<91(!}IH{G{B?^4XSQAYsv1E9-wLr^`7TuQ-Y_SQrpEu;RvdLJTsI>~zT zGZ3a=%sl`@+RcX}55P(xgOl2ZfTHs$W$8k`HE%zmG zl6IU-XESeWqUwxv!20)8tNxxkEr%SEy$sB{#67Gt%^U6%nGCh;vi)p$m`>n2FHctE zvM64MW&weCoR-FOIbC@)im0XX!=@CT1iQnUwf|&|_;(OpbI%FPbyXhy&{eQIbgH~d z4*^Vf-@=PDxecwx`UJQAAU7Ge&&SV``=WgzwO}>3O(52SBJl*+iSjueBfj}vPX7bh zlgp!>2Imp3zBIQ+_7P0wQ`j;Tj@(Ku^-oCPhRGW*C&67Ijx1c0HTTmq8=Tes2(70m zsBl;L)bH^LO#AK!-2T0sRS@kA$zudR=6Y-Nd%n_N>KO0N_4fr$eG@lZ23IAGEUbfX zIH>^B&KGU#KF7P)zLSH0&Ny5TIfe}nq&A7i`Knez!J4)%o}dMhf?xBL_i-r5p{+-h z4SsU4@N-Y0kx-`HjS%*1^;2rO)-`yxwZqxYy{Io#X;>-av#u~Zg6RX$Ec*aNX4?SKk|{gG**z{*#-`BY$&Ej!SXkV;;up=@hMO#>(qtg9%_4qpxNYv)Fn zKfQZ4e}Zig`z&0!V~Pw}>d33Pextt-!cfCr51aJR8)cNn5su;=s zVt5U6B|%x~nzbWkx+x}GBxQMhTDF|)jzr=RUE#pf%NHje;U48RQV)PtA~v10wa~y? zQAv4gbJ zf4^GO+z-9{endR%VX`<2*w2lc=u8pF<1Bjr-((;*BY(k{|*noUi|{)k!#?N$EnMw~x3>dWpS-7bi*le7_Zhb3Rhdok$7G7n_eprR9^cz>-;-xe8(;uH^42S69g0!f-q9 zG0Uso;^mSc=%YWQA|$l}sYQOd3xQ4Gh8emZ0Jwx(`ukScylM;A>;VYsxyOME_&@AD zgD}|wv0k(qu~j@l(w@ECI794QyALXmsy4A#9p`vH^WxihA3S?jk~hgU{n`>ZVh@h) zwM=rct}V}ybG{t-HQ4G+qnr-v8H4hI{&8GuYf0eK%a^vvafY+qc#hBZ%FN=XnISD-wUJ2+U4_~aTD)NXF>UdrVj&+RoW zz=oisBu-dg^OJSB#mWbLHdN6PR!VtcM1ILoim(T+F#)$74nPdgom zI5_YFKSrFcu@NX~6>@mF8XQ~Hlt>2DUWaJQ7bqjNGckphU}g)T=ZI!*JSt9agHIxs zB>;Y+95UPI1v938G>Konty7bK^hpnXU&SZwk&__kl~(!|QD+mBF2o&OQYY!az~vZw zgA-n!sOwxW9NMUY75`SA`lQJo$zJ2@_^qVU|(}Xqt>MW=cUxMkA4HK%fzSx zr8f%Act$(dWU&Q1_(WWl+ZR;^m2GyX-IN*B5!doWja0!c-~bB%<3kXJSXNWBT&C(& zlfu$BnG2nI`GLff7hgM2b2|AYNg2>4-Ff-@zBupvtUBdZ47S`KgsaKu(JLJtSXukzXaL%Ahm8_OS5{ z9i8wuTeYT5m5K@XKtiuJ`LWXKZRYY)W{UcZbIZHlN;xzpapd%? zBlef-B6UhXG970Jk6?p<7MGl0SK1M87tJraour; zgU&EJXu|jzE?LEju&N_K&DOMq2^OW&g1>Yz6 zG~>gwKg=B|H@IRQelp}Bb}_i1tnwMdZB=U}wyg|d+q~cM@o(Hg7fPB^c6QB^wWyjC zdU{2%Uq{KtH`gl85AGb9lGxQemM$D)sktdm=a^wGoxIq?f_6Wvv*v+hl;||o_9C1_ zP_ZVoaKUYT_|51y1CBv9)JZps;buD6gcDnf_eFJg$>8C60Bnw~ZPpQ^u47o?>rOs3 zLD(WVI~Td=40+){qYtnejBmBG z8FbncSYab|PNm_y7m9!nH|~aRSni4en^$6Zgae&2hCKXN6lUhu1}!O%7*otg0zqb2 zadm4Df~pc*qSkRCFqa`MksBGg_BsTj=cP?DOa1XfLlXVKd!9so=2NmqK|VCNn+@gf z-1U^7cunpoBvv(!F_~se@<>EQv(de!KD z8Q!4ATPb;Yx%oHfHBQP4B$m`?09mTE3)z#*_E>)gTgE~~y)JQS?BrqSlw?0r1JU zEikgu4S(zRikk86i+@&LmRTl^iDu#?jWLE)#49I)9?k8vVWhHgMb}3LvUaC35bSIr zXMDJHEx5O&a>U7WwadHuc*SMABAB+JiiXPA4kIkw2>7Z}dH8ShCj04|8w1&nTmqCZ zbhC@*7rlHtcp#;v&2P&}g}4h1yGSae-n_T(?)p%C!mq@EvUJ<^6)8`DuGP}v0^d~@ zo;|85zD`4RM}A2?ajhZrFyg@PX|YI2${VPAXFTnV;!!0k?c7Jqcb3$hz5M}xan*iZ z++z=bwN34UP&BuJ%nq}_&hh681+V>uQ9Ci^51V8zFA2@dNkp zq~n^S%0$X+7Ll}0gg;iDL4}<%&SR9hWuon)Tq2@Jw{d9#*4GKoyCmSb;HFg#t+N-r zGOkyD3>$dmt-==KE~=U|J$8nLY*E^EBoGe;Hx|gh%tC}@omajYp8gQN|`Ni&Hj zE<>4({Uq68?)j^W?+zb_jGkUrX(@_KDc+CpQi;_xb7;$gQzJHBCB4_@xZlIs#zflS z2VY48KDoF*U>(tR__A!Irm-=VSTKTlIg(hqHfi3%Dn#h%(@$?gAtvwlps&(4g(v=E z!B?H&9-xzqop1HErS(vDq>m$q4+(=0 z2}Zt-+-#D30H`+Ki0;-b-Y3+!`rMbZgqL__zd9w1HP>oWFvTg0+E`*^?(`)+23N)>UJmf&-t-h`e&@D}MY`Uxkxl~5 z&B429Xwa$7rKT}gqcBaw+OwQQzvK;X_J^CegzCk+}g9TZZa{=T@10 z8fs#wZR<$bk2gca$daYKXQs{~xax&u8pfz(OH#@A#2*{JqOU&@s(f+v-IjIm$r6Wb zuU2WFfv;9_yUHc_Aa#G(hEv|vd7aj>y)DaQ`qD-g-Rvb8p`paa8rdZ4>-c$lMEJI95YoAFsHh;F z;c71$58%T~aXdMPqAVH1#RI|;kxTY0LOMk@P>;j{8&|7@behcuOmT-3LQR9nxd=>i z7qRY5Ag1Qi^38yx;jwI3O>w%R6~P z4FdD}Ko2~))wc8UKN;O(dTlL(sI@xcT^s!( zy-Ys1=WVc56WwwW`$U3kwc&NJ7g1c~L5a3$oW5o>SzK$~P9UM&5Vs~IL1QavOjcqeaot<>$qg58t{ zAlRMpo6W)V+?pnqI9WhzdN-pY;q);mWJHE1Ek2|}gmRp6*sgR|#hUe5V_k|rH&gq` z)hj!7RVOJEF=IUdf;Mda9z!{+X=7FMSh4)ve(EjDf!nM(xd%=v(mWdkBXd2>y&muCDLR0UK=Il(8LXoY2d-z4Pogc_&& zUzCH%=}?75g;D?}C~>;)zOK#6;@)0DcVP|Kzpv_UB|A_lwL%Xz7qgkV_cjV#f`@He zkS3qiY(DM1c>n-9*cui+h|FICih=#o4jgu`kWjhN{kZR`REfs%_~RfpSv6$g82`L) z1tKYq`IC0RN+a#}`Vo{f$^1+K($9mCQP=CK=t45G|HJ$r!A4ZIam(O^DUFY9)KgC! zi0Grj2+lvZAOkc#t&CUW9EOS|UV&)EOjBB;T_C8vL{e@hH7+0`QEZO4iIne<{2yii zs5ZgXh3O2bzTURgQ833vpePex?SthN!i82`BmhV5!dTX!0bDv^pZ3bP$C~-#p#~Jl5$?K_HE9Z)`>l zx2G0fN#@mbG|5%G=IowELhvL>)X&T>7vOp4mOcNktr16CxE9p4yk{&>qwDN^@PfyuSF<8{2~2gdufa99yI< zPwhxkGb?2N!D1|r28EzokZ)-v75zTbk!iHU(T_SEBG=iul*HGv^9?CqTmqyCn^pe@ zH2r1P{one1f%W3?15g||1%(+4-WJ?Le_xG?qecU%s%f$e)DZE6frxA-c-5%oAYpOx zP{0bNxxk@509`+u&jv(O)h7^FSaW_P-9g>Diq%tR@4ZDgL_32xYm^^v39N1N)MQyh zqbkbU;}1spb+q_fn4Taizxd=POv9Wq&(%otI!_1s%;ELR5CRwW=r)_Bvn3ymZ~L&S z&O9Ywl{tsCEaj`%isBT1)!FHKo4`2TZ`V*bkkF{byyzfUL^(5EL z{6lW(3nAHy)8Eq16U|(0PCjLzl*WzZP{*&!wVKNF@5b9GDAFl?#Y7_&nVi;ZA8uRw zQMgWpse=y+rBQ#+M~(5K;_q}VES~l}0POc%aEd>fDdqOf5?)lgRizHqYCPM8{>nExYuNFy)*k+q}a)f%k3 zJOD(1zf@A~v3{R330BT20(Nf!NDlwtKv;>xIpb6QKa=r7ZchUH1ET?R+YG|$$*gEp zNjyi2hB{hc(F!Z>1oCnXy`H~iU$gWlI@y?ff^!Zju3>0FS@Sm<4#dn3@^gIyRU zi0A~HF^bWV%E6D%{;i=!|D9Pe!ixan;qpDR%I_m?H>uid)@mrFs;jcp@vU+#)D_e> znYxn8(ZD>^fD}WK7f&t@I&dy`(NcJ!K}@J32p6Rp^Y_M<{kQrZ25sg22?`?Fb&#-w zNj=*-vka8?T}*29V0S76IU!700BHAS!^MPnC~S1Nz7rDd^e;B^;w0(ppAp^#)?@s6g4&zlQ; z_PN>AcN!Me;7G13g0LX_m?7$V6e>xI*;s-kqn>N z=GITR*^68+o3S%NcnYcB@b?C{r1^Elq-E-+SEuuz;WYcAX?}}grD1u=U=A~@))5n; z^{nP|_a6Q^8?qCD&E!qO%@;;|y1X+&RaG}aJsXbQot|m-oriuakc+=HV%PC->Qc8m^e-%6Uu}P!fxkDO ze+)GR^kIrTohELym5_w}NbF6w7k+0TK$M)Vi{!izy%Z)PL*QjRqNGOmh$GF@bIv;D z`7Zvfv#5=6p%Y8RX#cdQ?NXMn*eK1dl%no>8440sHqv!SUH*n4>vSy>oj=WS(54H` zk#d-AJa|22x;bU_^N-^dh-2zU25BY!qdxJ`{f^Q7IxL;e45vY-X>zt9q45zbA|ax- zo#AaB_8ksO{}nxoPnB#gmvcTo|H@b($(&w&+9h;8+{0wk9WP{D$!O*T{}TyB^&LU< z4`cFA$*uv#S+5o2_`a9 zMq)ne8$VI^JZ)_o0pXZ0i0Q~-(}uIuoixA`UQkr1{%RS5?Vt2lRb>pTid~Rqtn60< zxyNW7HR3!x`<=HAH_fbuX`0cT|CTqnpRfKt;t~H{nladl;Kxjt6I91&k< zQjyWl0(2tyJ!q&GLSKoAWsolAA|0u-N>@no)z9pUbv-IkH%EnR(UsxbEh^ddHC>n5JYDjH7z>wszj$cylY3WufOjH`n;J2+0E^%r zBbH>~dhIuf=JuH}?|)$U{*syM1q3Z7ijreXO38VDRgST^6rb$isWr-wwgMxN!V^(Y^QEU#kqlz zV!f?DkKO=JUpM=g{&+UusCdpF-Erq_R2N%tTjN?u)+t>vw_@gvqf;tjj8(o*uHxJF zl0xC%J(UgTRmT*V)u?{XzrGB;oV`EYjUtfl9|jCkO6f{;GQuU$g@V z8{^LfBWO^O8Jg*@AAo2FnDd{RSGI1l)F_?;pQi6_Vf^GEEguGB+T)ui+BYDb5igKJ zW0b;5^!3^4;~wr+?4WCE9q79)V5pD_ZbDTjg*V)-8Ka~pTlX%B|E)!YLF6ORhMZ-e z^A64L$1&mgi}IGta|!xw_qB`))(*O?7q>O`%`x=D=)DXX$;I2(^O?}<;(`N{IrU2N zhPI-(adNW(OXXEz#6>6DuDD4ZN_KI3rb@)<+UD*^Js`@VQ+VqHO7p@D zWii%alxEE@Ka+pQ*T#D>`W`3nhe@gMt?rohiiQynyZwd(m;gzzivSdBv$1-Y(eFUu zl^a`8vHJkTF+%CfKCTo?Q|DInA5|M1l3p-wPpWP=7?eiAe=IeTQ^}~)00J2ta&xC` z@mRsd)3ZjCBQ@^`N^1ORJMSm4iRf8MXc|BBN5wym8EriD&ZPhbu}LaKZD<{UCz* zx}+ifw6kKj-<&N-MxIS(YKO~=txix6-mKM#3z{2mymSz9p3HkEJ7$dcgH&Ktv{JCg zu@Zqy;rsqxnqMx2M{c)nv#|KXf5|@>i5MpMuh3vFIQmHUZze+Y7FZvE_f1A;ErDB# z+FWpxUV`_+4uOm0y$vrmZee~jWDwEA*mVOMz$JiNM5b`rw#N$+uB8zdE6w^eE8zin zWGqUw%aBo=3$^vp=u9u@*qGI4nbKdn!Suy+65o?{J<{=-l8e%vncvnOsC+ET^LbR9 zk2K_irWW5}=#UTIB+}ybeGSj8v^P)svcKB-xP`4RKR7C|xhZ*7U?uVBY1$^rtFC%CwVIWPuy6uT(8Wwq*dh+ zBgOIR=`iNd+fhj-lo>PzJ|_WCDBsx^tHr*v`|sinMxwjd2mIN37?H#N@8O9uJ~7Ty z9ua1_A#W}|>C1fS^GXN~43D`BQT_Z>e^)EO>_QN1j5O@yo2ff8;-*l?Ef{U4S0A~l zRu?4!kF3e)^?I-!KHkNf$tTT;Ju;!q=jF9<>7EuVF?Z(|XSeSZLc5Md+ovKko78Kk1)sTHb(6lcdwIQ&D?IBhis!b-1UR^*hJJ`$ zL7@h_{+wIpy-r-RwM2uo*$pQ<-c*`GN+Rj?uI=bhdw>Em+dd`w6<{b)bx^+^z%NHufz zTJl;mn{VGB?1YAH%W%;(=VQk^|Jes%o6D`Chy3WPABW(TR$5>S_Pc#@or+_U>*lb9 zUZdJ9n)Te>CY%0Gs5?6);;pR+a(uY(WTcZ0ekOUl*WU&TeXa5$N=y6fBJ>|4Q2VXL z^^t!2%D2vvVYK!*1>=eV(tM5>0yC#7=-7*ti?)9=v=@C@*=&pEMe%0duo@B9$sqFe z(@p2443Q<=iXv;>CPP|9NAVk+N_p#Z`K6|~ReeOJvXY{?;DJ_>&4N6e6ug3lcAtaS z&bD|t#(KVuzV8>LdqHvCSBW1I!djP>BJuh*FKc;=16hX)z0Fn(^mIqc4!F`e+P5zP z%=D!Th007BG=^E@>$|C%RT#gPC?FhjUB7YJ8Ooc1cBReEB&X-{we&~fTah~9i=07} zVZ#@Wsu;qr@COV?93uj#bT1u^1k@)vhUh-Pw2>Gk*G@nrF_pYy_PGQ0yj#zt4%~Dl zyNZ*r$JFyAv=ES`kgt9FH({qm73eTfy3A0#C|sO9GnU-!|$X54`SE# zW!sDU1V4-k@dlFa)i$J+GM`LC2b?J&ZxL_47PBErVs8?(I)1rY-5T7N62_NDnJcU z#^#t9S_NO~gV%HMPP?9m_Pe}snk5JJ`L>+IThDO&yd>KN!Xq4`PIwdSEzNN7la^)i z_fKEESru)BlO-W%J8mgaP-tA-zqw=RQ?;kJ5uvQpYKo?^7E#cilO_LH(wGXrR_s?W zO}Ga5DB^Bv`YhBG+Dv{YvG8t(yE_-gv}UR&hR`BoEB zk%2844!-9_VIb>lxe5MrqO9(?`92~{{2(66>zjD*&rf20I|%7N2v2xnV)O+6-F+R# z9ZE4Gq{-+3c;?l-8wA@wo$}QRRC45b>jQJW`YxL2mN;?!8Fv4uTi{;fyzc7I zX<)UZ$QZ<8k@dnV)hF$)e>0{g|39CJ{AV+j{}eb`}T ze4t$Eb3JD-hjuLgoK8wkJK|o@`^JV<>NNFgE%tFs!%I)8@y{8gva(+rBR5pJj)!bw zD!Xy$J>Xqh#R!F#;1pBabaC4wsIOTv8N2Myr7UYCG%>E$P)O)*goTThB7yKdk41+Y zN0=p7CARnU9Q}qf9hSe)YMxDESNP8U_|W7f3kSJtw0($6bJHl;318ZnquN0(Rg@b9*Px=I0M zX|0;(+Ha6}Uu#Kel6a{-aTP*>;uwF_xQsLnVa|3RhYF_=>k~y5oQB@X(Hylh=LUX4 z2|0d}LMnJ7VEnh8-C2<_)+@EK{dI|yFGJ_{aLm)-5_aUsqXGH99bJTARNL)99l_+F zElb&He)XtUM&a>S>c>G|yDfuHh|g!EQ=2OfPOCiTM=bd}7n`G2GhccR;VPY63-gR= z)=`I_PGCxTHp-237Cg!TVT*zCC45rei5X$u#oRNyctv+7+!3h~FyM6Nzh`fJEGNtv zGcmR|MaPpacoq1mHOK9~VaZiY;@x>+u7>3$Q~`<{G~BIHxV}n}VR^Bm3_CT2a=9rJ>7E868^$+} z?+|i$v46diyzlDJb?!gN3zt5!N-!Ak0EmxGmJ}nZ^opx-APd{%Wftk$1iPA#3bZmx z4GuRv2lEN)1U-R+2fJ{vr(}R!5`1IkVD(k<_^@P;PR# znE3+o!sB~L-fT>H@$HXD`a&*L#?<-=Zr|FD4{T6smnMfHfh3oF5_2=c;uNJ^f9`e< zpU|cNt01}44mPlZa|>u(1+fFy@cbHG1UG3rjge0pg;NUMS)oYdZ!Pqr{#fe zZFO(?qke@CpbTOfQRVKJcPFjlp@}0Laq>nBQVLv>Jk9T6a<)G;c;$*<&_zi1nd~R9 z^PA_>Z-F5%+iTKeXN0nkvrcY>j?837Or#E}oWxZl>lmRV7bDpzjuZYgMKn(TxTd+m zwTo{fh?_pEz+hKXQwsy3P4PR|lN;tWcWM4s3k`}V7_Xo(uO>xV`M;VR31JQ?NOfG) z72WzWJL|-@_WV$Pj4x{{pn|L{2yDHO$BWpdxY~Rv_LICC{{>Z~f-~zECD} zv4T*O92pif>CjEBo#Avx@BmcB$cogss!Sf(H!0$<>(4O1F_v{HTl}V@j~#>&m`on+ z)YYkL->zl@Nf9o#9qG=Zg=?xMU!W3-i}Lx|1+jlD%2TN|b}BTd^jxNwqpI16c}Nd7 z*zr0BX3ZG%;+oe%Vl>f)@l=$A>&qlpG}hC5tuu(5W6t#qlr)pB?CdS--s`o4{HHZO zf}TaVMj1nfk83Q=MijZ-Jk_<*tiDrC_(|qpI=(PaT2-je5G*)66zRua&a0hn#HEwK z%6SZKdnSl;gh8dOSU-Ly9B}YlNSPWBDwOoTr~`-mm>q+}0sOv_1ck7dHR%w%fa7YE$QChO*NiODJ$D{t=v=Km)E zzTSX@a4V$ScHntljLI^dY*D$5u#A61OID?fN<#!-XGo00ZE@R4@%FEDzB}Blk(Cs(`%c%uUQslg%`LtpA~Kf6VHA~B1JXAY|HI&w`e?ou3KuJ2KrYI-L$0gtYs#b z@y~0z-EqFEm|(KdOE;BP)LfZKTG`OS)J0x@UdK)mz3fZ{t_OLrF*bZY;m=ph8BV;4t$Np7KYazM55CmAsFHejzf&C49E3aa zTs$Xtcr!2O|6}j1wuZAZmCL6xve+TxA}Sq0w{zj|;O)s@Vp?JxWF`~(P1ue>ZcZ-+=;JxiuT0%6wr_oFlg z?Q=I{%JdSGbH{G!D6P?sQ6pr*sBS?AZ*RHDe$2jw*i>lMso890(ANuXCh*6r{0$vj zOW0lykrA7%Dk*%ZF5}6Asa!?nAAb7TCEwSsCf;o7Cd$o`sw}!#do?!<%!x^M+a{dr z)N!SaZuYa-*N3dJG#KsisvcC;ez>sawTP6?kRb1DWn~Z4$z^TPlooO)->pK-K)ms! z5%8xq>9lut_EC=rhWGIpJGcZHSmY?aO@ZvMV^e_wwZ^R)wSw#|;dsV6 z5V=;BMF93-2+~9oH06LUtSjpXg$XNCqnO4kEuWGFP`OC_5I-)BF+@4a154Hhh}T%z zjW)hgMLD%ZPfo74y@~=xrJY*j+n-EsIpNThaE0SnRh{Lp^6ijYvqbOotZ6Y|EUF1Bv=4j~u# zE`$N}rs-oCuU^BO3$tQ{40@bIsmqGP-29=(hWuX@;{0B91?}m!6QXWa{u9u@Xew@3i)~a<;3SJA`r(W^WO9Lz5Rc*YnsFdU zI1*6?^_QuA**}X?sG)2s=TaoaXL@a~zfuZxcOfR}J)`^l4m2cC#ydu)kh_F21`9B` z_J^hDBAh;Kt7}b{0PVHZsA}`*%_FTsO-lejK{v5zsVK0K-OKicT%CQp_mkz~D!E*j z15@9v(f!zW9n>J%I^-bALcFMGbJp_i!k^g~g|~duWQ`C7tamZhZGpLa({z-BP<6_1 z{%yapt2yFe#gvAgjm$Y-UP0wTvbJKx`}KC|O{#Nihd`lpv36?$Ay;iwA0g3x66e;d z#9`tI8%1GsBtr3&aczxvS?ci}5^VcF0Zu~e;A2(Fi;Yko|EYY-D?$r`l^5U=?-EVd z)J5`r(PCIh7$D@z;e1PTEWWQ-xV9}M8kem0ujWUbEmOYZ9uzhZ=H@P!Hn+ywaX-zK zR%1bkH--v?j%$5B^KvdN$w6+uxc!tiF!7XIfNP04Wlg3YyurCn)GjsbvSmUtHmHJO z%YH)rjbxCI&$%Iv+sh^Vooz?LgO0%&z4YzKlvcesKv|buPRb;h{ZTbSTFG4GMX;M( zE?$)5*nSb3Vl8l5)mFAInINNQfI{8{c6#P_f6ktd#Ql7mJo* z5ShJ<%{0GDRRJt0_!Y{ZfNY*_c1Xff$>Y23>qD-C>?c)uaexRo-0yJGenRNJ>>qj_ z1urj;w`|e{a7mdaj3XFEqs)~Vx5Tklv5$g7W>ep|!8XgyqmhJ8?Zlqu8J-K7*xpxX zX2;5213rVm`UZN9qcV+JBkz{_1ZH`?0-zMV%^*u2+2FL03+}~_jUVlvYDrj5Q!QS! zexqHx>`Tk&A~WPY$Ou6I&W&(dG34KV#VS*0*$F3o7)?t~JBE z&2U8}l??gy^&ty@cM1}!~qy%C`qTaY$YG7d91Tj!}XOO#VK3ZSz4VwqS2~4+Tj3y z)7MEMUV%{XwSc)#N*JkG1k?HR>8i2fphQLUYl2H#^!M7ZZn%R^A>=$qQs+A0_=_J5 zjms_q8Wsuom?9LsW>NIB+XXOeQ*F}7#mHYA;U^y^-A}1=5Vh2z3y;)77T-av_Qcik zfKUpDf?gQ5sRWuEI@_xK|2bU%YVf}MS@hCx5$Y!(m&0J;F7hW}j|umO1L^lyYA67g ztX~4Lt=;f`LzPpHyASHp!my?y#mnnf1d;1nB5aYtPz6ye(4(ny$)@ z<0YyvxAoM*Wnc7Xu(EKI2duP}`vq?hH-?Pl#)x;vvy}q&!2?dbjY}g!)nJ z)#q=#)E*h?^eojDXHy%})ARHsU#GBiz0u<~A0|El(IwX0``H$3Xs#XePL`U1n^-gq z`9m=W@G0OnF%AslIBoP!HfS~)&&rp|exjS-j3}VU& zoF2ImB1E1#;7EMa5vYAL%vf+r?6|tZmn1Ucf|#zPct|ycY*fpcCCRc)eQhyJF&H-^ z(zU!O05Y(Y-}v0Tc%g|-TI>5O!d0a*Htm(+@x6?u=|ldgpMYRZHDuNTj0~XEQQd24 zEY_@?2vsN*SOC-)=s$Q1|2Z5KdX(Om>I4^aEOgdxSzUouOZ(l9_G|n0u>|DQip%@c)>0V^1cW5>e9W^ko?8O zMksPKinXgbhyLKtFe(|#u!)G4+Ja{w zLBk}(v;YaRKXC?m9sjWEhJ+NfKBsL)bf|70w$--A%650#vdW20Y&9pazs2mbc^@J{ zMY~A{M|6v=t#O}HLeF*AhRo@>qRP)`h$hRHzAqI)~E?HgR|E_2tnFuM_{z51~wK_cmWPN`@a z>Y6Pf7F0}a0y!^@7t^7Hi$gcKr2e9O5k!{Ix!s^VRrmZPX-UD$cp55^r zLB!@MR~kw?ElBvXs(q*AW=xLW5}Zo>gz^+wFpMh)ovmLb>52V`Xx4W>t?JpIfQ4e_O&t5Yjv31A4*~R)5w7#2 zXXZ^T?_ZmX#eYH;BYC-+Ez@26Bsy2kK}XG>PL4hCoNuFv-L>C#z>0cS+(vnaLw|wVL|L@HPTq~3aqX`JQLZ>#dEZy%(C`CkJdR{M z(jCWL&xiO`kNvW@i*N4|E4%b#x-Wz(Pu$A~aod<8=^LsAhh!RBgPhgBM>ix4@ITh> zg{mv!f0@+C%8ykqYTm)V4|2)Z(YK^Cv^GmQrviHijN6hkjz8SPz&PkGI?C+_Z_> zAz)Y{;!&s-%`5e7uRcz*02ukv0gBbo*9;GaR9{Qe`=yz4CF1C!;vD^CP;8x!s1d7A zZ#L)svOGk62%!vg^|X0xX!R37I^x^RONdXABHx_ghDJVE@r}&FQEKnOMwaTaAc2>F zF+LO5x@|kV#`);fSsC#I#rs!!(57-Sfw4DEZ*mf z{op=yU20jxZV3|%t{fU(ICFYt-0|v_mmFW{>O_`D(z(TsB)<39iPJ&!xS$D{QT4#ZkVN2YFQIk$z1HNU7bypY z+;tzLEJgNP1egS7I24HgLj+IyF!sHl=g<{X1~ohwCD6$>HnY@3%an0zq&QwrALbNq z?mx6Y<2*OpdsPcTo&&Sl1?a0I6iwe5#=C&=>Ev`Y8WLEB+2p0C5fDTHaaByp{`A|^BOOGL z(rd7>X(?Wuf>LU!-B)2%VtccR_Nfb7d8f4TL{3|wzNICGx;O*_Ns?AV8gL$+>a&}H z?org{rzsYnfk_^Kv&x;4vZrhMR1d7_9lroDxH1 zP+KdlBX_M)px;3Z3Kxe>m%TZ;nu~fgS6K5E;Ou0Avv2z#+nAliEWcz!b5*zV8xC8C zugv>NeZS#|i9l(HqdZn`tbRFEexmlhLaQmaXhXyfJPI~rR2-%fG_jpjab&hq8-@7A z?8B1p-6z!#M9HO>OzIGs6ehWj8Ru2?g|niT7+KBFSsdC12WBwnQV(!K8~Bw-u~^p4 zX%N`daE7U&u;voBBL%$yj45^TF{@9Ho4Hdk z!+5xHwf!lPoTNfduHYU`Ze~-QbCD~CUD2VboUWA6>}zE(K6K7;%tM*>$q##oe)5l< zo9FJz2$ENqYNgfgv8s*j?n)b@-*@KUM9Q=fCUbr*kK2oTXE;=|zS?Q8bkqqy|2=6Jj>go3(l+P1<#O6rl|soQa?TDu zj?YQSx+tyLVxaG)-EUV*=A;5I!Yw4?QR{kU!$fqkI&CZ}{1~Sx+EH?)Gv5=XXVFn%An)b%~57e(z zT0XNkcXrG&6wTfu(QtfB(3WrTpul>kzUC^m1jGF0B+e1mSZvEjX%y6-=F&LO0QT(4 z-WTDRtGdIG+yAt3g-w6y6eo+>;m@MZ=*?6H5q?CdJj<{-?3%7z*^r|8oF8FuNy~zY z5k^)*zQ1Q)sFvcl=ywluQap77kb73jSFl2WG;W)!Za?!Fv^&Sh+| zG{kd6XGV&-v}O%%d;dZ``RvQ)0Byj2Y7p)1q@x=&2=81+x4Tb3$&!881x3}MsadLr(!x{Ap9^y zM&ME%&EDQkuz7EUoqd|-{lepbHV<7;xq;Iuj>tTAu)f27E8N?##B%&WAm1 zr*13(h97Q=->-P1Nq6)DD(>wS7G|n!ZEhZ3$b!Ra4#}`gRVm_DLpBZtX#3nJY_mpO z1dvog(YI@AaXz_*d}+e{ujPx$eG@jkQgpG+LZp_MuovWejPeJ0PZHYslp+0onmJ+zKYUs++ zce$m?fA|4rujY3v()ik}hS}q4+St`t>#Cw#cm}5sn|>Da;|Elk^re_+!jNtY#s=JU z@s*t+b)e(}7+$0u*3HBue)>|=>4_tYSPv{mBEUI1Xsa*Z6|#ZN@=*%@9By6*j*7xK zXf0xDq3e|uZ%mMLQ%cjqN<~^y{}`F;)qYd5!U2t-&Y}N0D#FR6rR3E2dq3@Hh!SD! z6Lbs6TP>}s)2nFtSRIl|p6(SnYlxoz4K2zeeG0}|uPE6bySIxXMZ6wVU47VuLDTOlrzD<|yocpVq*oRJfSHtoJtR?J`skDccq_s7f5{hhc4|n?AHKlCyS# zZb=Y5kW^4V0DQyI<{^PY+kc+q_!1s(1WZiRW*@sV4-u-ScSbQhXswUADX)0B*ZWOX z^G-&9Eg&&aFlkcn9=^Zs2>e}?nDOwvt=6tWS98LdRP-!+k<Cq ztodqwC$zBY8z!_3zuW=%M1j@XM(6nS@#yH*ksjhthu3)mOg}82{TA|5wEeUoS)dyb z4Y@zk8=xlK`vzUJux2<2d%#}hnY=Jt$hEyE{D8}5U>tG!LU z`yE_dk)@KamR3Ya%v)KhO;z=Eh!WfZnC{qLynR|OP_n}9xuV$P%TADs1b8=IuF%AI zCXD)S3FbuIV2``g4XFDe-)17o-=`ab^-uW(lDU@pdA&_a(YG!#va{Q^S!DtmppR7O z6=nS^V}yBnzRyl07$E#p*3Hh7YW{KBVZS_z)`^x&QN9hm{hFBDi_w4}ZG%$-2@n3q zX?R_rhpokQ<$S*a{vL)Xg+rHdoRsR6vi}EoeuA_Pko=tXyM^GC&@)#Hh8-5O_$m_a zInCi~F%prLNLo~^Z7CBxK*@wYsWqn?ZepK{mz$|m&QzJ^npK>(B-$8-Ork_wHj5!0 zi&4EHxa5$p1bWe;nq}cO)2nZyTvO}5@Rr7Kmb@cGyIcclfPA7Jh>=$+3iV~s_l_z` zvvZ+wf>dR`LE2@Rimw4gQVg_5Y)oxS*2qRI!`kZSw1Yp0vs;7Iq|ckt^8q^TlW-sn8>|eT&pW zY*?%8MiI3|nDjaB2u8FJkwG6x}_xzy1gZUaJMvaWlVgX42f`vG>R@r})mPaYwz6ctGs+ zn1N%Vuz*Z{Q)}SW0$n*ze%6Kwra1ivdbkCOk0p4D6F1sBHkl0-5LZiE8J7=VT4B+s>!#Gz+*#M6+32N9s3JwBF~@Mee4Z7BEfAA#1i?WUI~)Rgn6)E1$Y;>n@2G za=uU0_1~%8N!z`Nb+_S1L5%7juY$YMfqC#hU-+k7n*Vsd47QdnYwwOS!$49;9hbU$ zQuvcpDW$c4-$(?Pj-#GvvC!Q}kv-&%AlqH&{L$|c3kny^`S~Y6cvb%KqI+A7;WVc) zs;~m1_v2oL1=g#RfcKkT5Jt#_-zNAEW!m35F9xvnea@dAAc5@kx6WUu4Et+f|1zJy zCdm%HbFVH1%5O^)o@LME1-tLb3pn5OFh!jsHMAv^Nv*LpIBBaW zz`x$E2CN*->Ph>c##6UN8$G_wEM&+m%i#;ZY8E`C!Z%vS7KX+bp#QFy?EpCJ zgMXJFbpAxQeX2&MaVv-PzB6XRRhwZT!w~^D2A!Et5<1|BPAvjZT6)au+Gt{P8;PVV zI#**1J4Z*0$xB>;Fu@6unEi$1VN#)`>x=LHzzS5GDHwry-X28uRp^V6wjF}8LV4*0C zpOzBhOl*)leApDQc0d-p>qoDz&&^{R3u-inJo)C>zW6b6IymHBLp6f2WwA7 z+4Txaj^}$hxIe^}mP07&wxOZy0<_e5vp2Saqug)V`jZz*@nS>^kxCSp2c(rGxw(&( z{Y)(LxIvGCizZ6a?ACjX7<=oTp+syRv59l%Hn$v*0nIHq()ti&cVXeFo1Bgu-ML%e zETx)`H-ZSD0ywE3{d4l~f1Ey4CS=`_@O6hzTZ5R)A-JH0R(hp!`t|c&rCynu(B(s^ z-{SC3zzoYY)WKK3>q7{2Jir$62K7~z?z0{8F~2lcoa}yF0xtZ%+N2ce#!WfLZv>Hc zaqJ%L`$g53mf1siIc+&fRWA%H)vgemK2($rX1hNEXGnEDoqd+St5U@eKhEC?^l;iaT<3QO)b?@w$A% zX>$@YYj&(Ek~m!3ccqbB5j@`*!6!3n-F)dDtir(g1NFqD`d#qM@g}fF4ra|8Ib1;ANjMmbMm4oEIPNe#0%FLbz?l`;$Xnd4x zMa?$Jhz@bv?H?o?@v3zQ0Skn+PyRo4;&&I|kMMzs!-HfkunBja_@{KOJl14Fw zzL~37Q6-iYk_%qP{!a(7o z)yy^Xj11l)Rn|Srj4sxMRP`}Z$f;=jGs5Xy#q_x^$EcAHDWK9ov;5lC)=ajACABqk zE82}|Ug?g`!AsOVhv9s6JnjcL_Y&%s|*RdOvH4jP*$^rlcFmU>73!+w6oaq`(wh*#`qO5FJr+R)Pk~0vI#aDhm z0r?e2(7q8nw)~n^YxjW=_4c~joaJ>nd;Uk`aanY-k@l$#M`1qxtSAJkE}!lge$g4d zK8DWRHZ@S%g0yW%l5<<@mE&;zsPGV3LUERQjIaPf#E$tTX;>xGx%VN6q5bETH>R^dY3)V)0)?b+LRV%-6kMf%XV+2tQC%l_J(=gL zG?LtSOz`4_(qBO|879+hjBnEUo5uyu6P$-M4yJ*@ZQ z@}03pJwbm=sw-x>0mc`z9x9G#m)&UZW2cSfz9QNsOUJLcf=2D(<4RIdwQ+q`EwPTZ zlN;PN5Vajj?0$XsY<-Z{)#To>y%+KPC*UY~{A-mK$5aEI1eMZ5avmbY+T#0kmFM*! z={dQm^B;wjpHety>L~oR9(Ju#Y^%^$ z2ynsr6KTPJ^}Tw$MKnpMbJ}>Ccwzn^hxBf1gZ-veFo~$ar&B^Hq?Q-@d{B6+5l5B$ zBD10Xz40S^&@&}v z%KAFuWnva-s%z83t=4ny?-WVg4Ri$EiLqiNyI9!;te(%cvW98q8s?Cd)uf(sDl-tm zgy$FM&Cn_r&h@1yNIH&#!BBkc0*48w8x({Mt z8d?>c0W#E&5M{l4mOtCUq&a*YqjS3Dl(E2BztgGSF9^R-(9CO^)WJ)d>0Fl_H*ru9 zbZ3JvTJ>t*h|m=n0GYai5j*t!vO9EBuQ;-@3tl?6)1{s=l46_z1}jW9%#m9*P&aAn z;8CuDN?(!~kKEP;rmb>~NP#T**Mf?c!0v z9&>&;>dbdiR}~+sWWD$3Uf{v*5mmaR=~YrbH&<#le=-K0k%uF?Jmg98wVot{cd?)< z#XTC-Yus@r55$Tzb-&6P+%bPmj)9tslky)ClEHci)8MMa_Xk zC`(wrE^YfJ>kM0kdV2V=qved^A&3l*k;|NvNdanYM#pEewM_ovx?i1gn>Qw_IgTDI zBib&3N{p3ED=FH_ZAzXtn%JA>vwu*WbMN-8Da~gx(-cVd$eoN6_kJ9I8hcIm6|&3Cj<8Q{ReAzZ!RyGa=3b4|Zhn=6bpYY|?$ezhGybmPpJgAbU ztt(mMJkJk9W6J5x+R_9f#Yk(sYNlu{GqZ9-_AUPT=&>AQy!kc*;{NiMSEO|(uewb( z(N91WITtXd^bb%-P z=k%z;&~B=5h0`5k$X(oWy5e64s&u3+2EG+Zc{kuS8PX_3XC`2T@UT59z!%QwiEmp| zpXLT?vabp-N;gC{$9Rb)&(%q6)2y_Ze6cO|bxXjrwfT1Jsh5-wDJX#R9lNqKB;UOh z{INj#$0-_rNMN!JP?8q6|6-lN5?O_)^WV zp9&K5NnPAleJ&a*wPKO<0)*NiXX%V*nMkQSn2YNGy9uR-11ha zZK29(K!oH%uE2}(jb7!CskXMegAc`VZx}zoNpCp{KUw`zGw(A|?I_;Bn-(?NRc&ta zK*2nJQgLkJfqu^9*|qfvB6QKvUgKSf?0Xn+7FZ3z6{^C0be%F_eG znwj0_$It82L7+mbKX8QpWFFbDJ$33;eQR^elb|CHEWmf_lCF7saF6BT5IB)E?ezxN zr%5i`lc)@{zbnY|pONpF!r0VaU*|_(9V5C5hw@lPc)$kwhdy>S+Nrf5^QY#!3{mdU zIzf^Z(##$=AMS#le*&5z#ovT6Jle9K9#;IewEZsIT%yLn{hX09q>k&mdU9t=+$@&VpvKBD@!yZg1H z!nxW`L(%ki!gOMP=eK`YoLhx;d1Z>cV_|yu>Ybshdk=kR8L2AxSnf{;f%*ksCb?W^ zgvL4hqjuOqv{5`|fCeuOrAih;pa~^Jd-JKK zDLtJj5+qBDe}(UUZ9{3TP5dz{XvBsBMFcZr)h8kP> zpuei68k4_wIQpntCtJ@w2`P8fJa`!eJ#t7qp=HY?@190g+hnnU zR?mv35)%6C5g*$=^5Ci5{AWkZt#Wq+=>!<3ajU!ZV@_%To;@WVJDP4`Oq?=IE|s`K zZn$REWqokvZFvYim(x|4%S|#q4nmFa?{;ec9rMJlb+J<@OPNf?K^?0@?czVI_!dQg-E)A=#tdmkl^>PWto-hkt_ zNpR%ZTkyQ+>!Y#dXmfXx)}S)=Fv4MhjZfi|MQNe1-am>b?>QlDOFO z6fp#nP_VI)DUwT#pSi{W=qH8@fa-rbI|7F!o9aO}Hk&RhOHg~V&e-b+up*Q2uch%% zVhZ-=;*WWqwJO>&)No2m-e>8gSR;n(x@I@;i3IZJU0ITMX3pU4o-vX+rc;NJ_fvfP%HZWH5dej6)Z>M8Dl9d5Js z;wF#aDNqniLF_o?CXXK`25M>lZ8H8fFaHNq@ac;qOGvJ0Nx2~xH z#QrEt%_KO_QK2S9-tq%96rvdLWUv=>2h{fZw~d8&c6OzAmQS0|y|QwL&^QDOAxr>3 zgjkS;KW#j_4^n;yO76AfAF&lLFrp!iO)X-UALSvhxhemX!(kkQ6^{GlLuL`aeKdP* zQ%fE78jr3SZ;(%wP#@9Vb&xKs!kL(Lg|FnPbe52NS|)ql0DfE`TJO(Y9@Ibi2gTVP z(5bRKwJ~QTVpvmFc|Bgw$S(15{n3i)LNRK-Q?Ztuj~!>93kBlrAkVf=R7`R1nbn_e z0{;KlO#bf#K@&^9n{5mYxV2@Se5f2@Zk%Y7GNQt#Yawzb7J!v=GBo#SKXr|h@4^k} zSn9KjzHCMH`?zJ|%#HQy9^ltDpdOMtOJV6^3#n0V7i|*7EEd>`f8-$b{gu>;OJZeR zXJ$K+1d1K9jCTUuv~ujHt@t71-*yGRb%e3$|JY3~I<8wnw6m>&yq}US7!*mwsVG>W zDKMePzN^+7n%TZ?(udQSKh_%9tQt*NW1pVjNI5_Kk7=obJJD=A~nJb2v z6c#5!%QA#4NgNJQ1k56wb#C`cy&*)>C6N8N0y+lQO6=nIJQiTG0$VUn|9tf;HOwEe z>A6MuaH8%Eq(Jk+0(@E3l$K{?@1gC-bm}2*o#_7d$VNGqF2%K8q8)M-Vg@jP=YH^q zCj$Sr5&o@{gBb5_E)pc0E*JC;lwXtCEN7J*A6CHcI1Kl4%Y`^tzo{t@q6B6wWgLj` zi30d8Ko!dwfC>Qr`2zP3Bh*0{*ZY1L3>MjePEosVo6X`G{v2-goJ{gvFFo!ENs3}))E6pT>LNi9H;CG z@c#F;hnU{)WGk&K8L8?)R3Vcner)I331V z!B8k3vd=1w@`;i4CA=}CBwSOYFnYidtEwX~3l^aDcXF)%Ga;_uaMb@>Z2Pb8 z{+H4wUs^?Zal_uFeuvxL=aAAz zi2Q$Rra{UujHW#M5AF3_UEAhm!0^TeYcpyJcFyV z@V<1UfMX-!vR?<;@l3r}fbRI)<^OUg-bzFKH0+r4@lv_*Gj$bcWP!X9J^-jtK)3L-pyvKy<%4 zbFC?+==NL^rPT0lAcEl5+}tnssr06!Thq?Pdf1ys>8N*ROI)AeU%R;sN^=*meY|LnxJ8lfpo1O1q8q_pugXdMV&q{* zh5(K2Kr9QGY7PoHEHtyY^9PTUBZHE7fySapSpZX ztWDm$WXn7r8eQfY?wJ4p34B{Chitbywd7oGp>`1V*);H8W32e)OLHDYlz2-F*JgfO z-ke0GO-}UL=@i|Rim}I8%m121^t_Ou@vSmQj22RB{u-Z9dvXj8*|D2O0jL*hy}gbz zq|nw~H}j3*oQVyb9#*MHkxfPA1(iIqcTgPrz9w|!%EmVTitLeH+fZ?TVCZZy26tco z6Y$GT4jRiMze_nO%96E~?fmyOks?ify;ZF~D+a})J|nZ#hhY7r7q40j4ig$4%RANk z>fLw1eac;JX|}!#Ge$JlkJ-E%6YnBh73>wZalLFF)E$glntg!VLa2JO_LLg|g_Gc# zFoqRv>ZLw(7#JnV(H5$JArAf()L17M7bjq$#+bmKka=setch6`)5F1DRNW&9HFS96 zoVNx@)1s|%{W*@QdLL47^O8P#FRtK)oWFP%@&4U)^V#bMr)#yvzPcgtxpygfKS1j3 z60L~16Ld5%aUJF{z<`V`r~xackNoUue8)7~r!>xsi4)c=rg*{GkoVQ~f(xEO_ew%K zBstzS?PG9_mMH2w2MG+3@2J$!J`y|wG^P5Y$%1Vm{cuf$Dl2wsw3F_Vv* z7((QB(j9D0A6(5N(uVqpO*^1Wt1a)`egal-R#3Xiz3fe4OPVC&rQuvN&Zzf^U_YJ{ zYV%N9&u`7%JnGl|NFH1vt?e~vy4qH~BIk1q54b8_>VT*uAJIlnedOoxOg@H$m1wtZ z4Ic&C#!Lwjj0QOw)%SIP0RsSIF{hHRZ3p%IZP!bymd!>J?8CdECP8Cr>vXIsYtLrt z*3^TOFCjWl(8`M)d|PO!9iE^yakp)JyEn~V7J*tFoHOj=2)aWd56#IiS1)3dJ7s*J zCEMnA=It8N@f-2fP1F;#$>j*Hzc_M1U6Vka`B)xGfKVDZ4?i?qF>G;O4D2y3?Lf-J zUx(1%lAR5K{^Qt4YIP@C8wARnDvPW3F7gerB~KsQ!MeR1ON?f z=aC$w1iF5vt5%qrOJ9#o=@ICIbAnzT@CRp~k{D`%>4?7gfF579upmibvI+)8HO7fyCV;_l0b>u=exywOS9`Y<$ zA&scGwU&bI2MZD{FdBcnaTT-pLgX|;TozqbzvCq*xZh~m*m7Ex-z*EMV+_jLKGS=T z7}P_k^DZiWTme_y2#MM*2eUkW)8eKg)f;!)+FG_; z+YiMOLwmy(mwCt#v*7WAx@T~o!4=!|G(1|teQ1ALN1AJbi}n~kYNLlS5XjD1?Q9$a zO&hXAfW953Q!&T)bzEL`G^GU45tf}O`uR%c9)DC`?9+yY?vj5vSI3KPm=mxS4CK5zVbjq^Dgac3A@b>K1r7IV z%}bJxZIRabqSj2P+E$abeXlC}6UgiN1I4LElu4L_(U_8tW2Krw^b8b|jdjfdyAsg^ z+(|O!oCgZRTR3|$hiM1=(p!p^gnV8U(-jpJ+vuwgX>X>0mQ&gu$N@%Vu&Y;`h(S}r zP}3H;%6j+K@7vmZuH`MU5;y^COL&{p%D1{YjoXlFe z=l{K8Y5!RT?*BIbFN<|!nWJb7#Ik3kh^rq~KgYq_mcR4U3csb2LYYcIZn{jMbf?-- zKrJSe0Ir#H-yVM$S7(O|kJvx6LrW^F3_1eL8`5}oY=RT0kc;8rS4BP53(wMX&i(7Rn~UQ*l;N^Gn-_e?1>#C~>PWYL*`i6$7_&=8AzNq$cAd|4!0 zEX~h^)fNnre3N9@#1iK&)T3Gm!fC4YY#}8&cH=FM_$m)~q3P-^LP0=?Yu zZf|@`i%J@A|i`Rom%NravtQMOQB%4O6An`IM9(QfYM`Ld=QUND9eS zr3hx$4LuU)d&Aue-0|~DrzSu=Oi{g;uY0f>(c7dp^C7E^9cdNteDJcV2dziuMv8{` z$6h0J^m)@!ce!Pre$?xf2EFnsHaK#U9j;>^;Xk8QuWN;h*Ea^E$>mK+QE#nc3FzAb zg?(=U?O)lx28~VXBx-|QM6R3Az7J<@JP}W{U@Bq37mUVM(b_<|38*6vwa^xWFw+1a z$PVNy-<++ea=QXzL9(Ez%<+ht`|2tmLzblZ)58GU`LpZo+6^O{ZW*-pCz%o3^N9P& z#+h0D_(m+a@3&OoJENhG1eOq@yjft@9j2{$_Q zU)S;kvEF)(-Y~?jEU>)U(v?3j3=fo{9F#yr=X-=febT`StL1O_1P?d>gpI^vP$m~* zQ|}o2nD;T14CKodt?if*jdOzwnywH*MDOLD=DILX;d_Mi&%8LSsZ01OHiihT%$JW9 z`C#51iKNt+mS&Y#@FPuY53T9AlM#Oug=@)`JxO=pbm=>YAYVa=;P%4VQ)O|JF*vK0 z01yX`>g5BH2klM%TSX&d@y139ES`6BmDn!IkoH`Ywj2q}wRyFsxAs#{QZ+IXnjfmI z8JOz^@Ss@#Cxy_zIju#%e-Ih9_x%S6(m>IHUr0KQXMa%x`0j##a>GAff7Aw5-u@S# zNm{qxCJ??H8u6B8M%CaX)z5=mr7(h78)x_Sf6=N3NFYN%)X|Vz&KaKWn;@+ zLSSz>Mu5WB;^o!1Ae#4|_ChGmRl;ui)srkYtixiTSvh;J{PaA>Q_p>*F}oN$Ge8N2cR>jcTucF8osTUU%H>~Ve(SN zMS)>XinH9dz|hd3*P(PpjyqUh54QwB8W`Z_I$G4mtE}!`KLFY1&-9U6qq2mu##qIK zFbDRu=Ml8Jm~uCSw3{-aT6A356Cand<&-F8Q?ef~dVB+T+`lZfQC*1;9_1Jn9^quX zTqbrwbH>1MowCY408D~zNu4F$eJ=m5yIY)IS}If{PJDP0DQdTtjNl;_MKl(#8y4;w zm)tyPUrN>KrbTqJ0C!J0Mdx&4sF0$;>*<0yo+WmVJx5;FjXXl<*O3>T#eaZ?1>FNi z3v((h1$$w{e5ZNaHSz2y1;@;hJ5X^fZEdrklbXV`199wgTmkiSpcIyf*y0<&D!|+} zU?SsmpxQrHirv#>@=m?vQ7-9atYo+6SGqSA=sLDmqgGu-Y%5Q0Ez6{g+fYI6l!X)z z{#)teNygO+F%Dj2S}6v*po3DEvRc!RqH@T{kY(ZxXC#?J`kKv7@pDJ>V4^JC2kBh4 z*Y%kb`UUl~Vppv3{0g&`=o0%(>V_*S)Ep|dw&GsIuTUpRd8A#+Bx`CE2H~r5;64M} zn~u0gnTXEn`?5BS-Gb5UZ!LK0TSlbmc-{mA6Rib?TNGY^}+Tc0`@|8 zIvn#q8clP1n_CL=mI;BGsgFJOa}-ti-mw`Z^h{eT8CpZg!}K17IYSP^h1p9>SO@vN zPvgXtsuMR4*|tFkSbV#elCO$R#6oSpUr8jFjIVarEXJ{g%X{GtX#okmNp5*qphy2K z8zmPFH&u-JP;AeOVkMAJGzPZ^>0xDn)VLS-+4WA=x1wK3yxY*j^q`9)JrK`&iNO?F z)SXN6lZ`T)r08ytN1Ti_y#ZD9smvp`BF96|8iHzgLw)1u4Y(PLs_RllAYwH<7;#qf z=y|e>&uC|>f7O$(VQG2#c9{Mu){UtLJm?A^ujX)hDDcugp7Z_+RX=K+j@YjqHhju6 zf5t?UkAm|Ec8ZJ?mV#~QEab$myp!%02H8m+$)4UlgPvhA|4ZA6XIU%9vz8~sPu0_+ zohIrXH$6L_;Fyv0mi5gyv+oj1sue9S%uA|l<3-0u_YeZGiDnxbm~tcvHPhD5^}T>1 zPXj7@8BcM;nM$lZbH_F_+OvG6~Jx4M_;|1qmu{c zqq_rPSR#m5@GMu#l&u33gM)jw!RHRgT7K=~eys!ikrMck`zL)yTEpTt#Yc}(d$eL= z9rYGG3tCP3a!#M!Bu!Cld#^HHNs})U^ct4u+GCcI&l%9Xb^9fthYzzmJ}(6W={xot zwaVxe^BMJ%xdLZ7)&UZnc|yr;M+UUvp~LdlSDa{ja4vvbAqn_CR*`!~0hC9W0KD0w zRD}{*C*BZGKhtGy70Sd*^+-lw%= z-&}MYqwP0&b;m;0vm@nPu+@ZVRx@E(4Sl25*Q+BQzUR?t<^A9dMraF~Q`g9X zZMLrBM}2VuJ~#{U^iZejp;vcJj@C0bCW0V{#Cq_hNc0+-slwkXiA3m@(%`lIM z@ph_MN3B5$hwZLTbUYpt>@J8Y+ueGSVxJ7)FHD+*LZ#`e<@neQZYaplROE&+)6m1e z{wl=u6V&u$^Zsj_@sGl6mnwPknuF#Wu>t!#U0sk&*@2HSr6USI!8`ThzK#DTg`0*y z!8d^M5*l-JI#Ac+H)x9(?-uNAN04L~$`y>W*77?J!piV?s5NOOqpj~TKAf~$%*6`L z6Cd4G-kGxJ*7a~%_cdV|(M4<7Udu$FrmAs+uxcfjD#IcH-DZSGkXyHShow=YE64O= zjiE0dZC^Y8+HA>9+aNOF(+nwEJM?{xwggKawIuJ~(4D4teYNC26}?!5Q-1jc59 z$sr+F4cCi0!`wi|`_%{?MAFNk`Gv}nWs@go5t0GdFU)dRrW9Hs+ zrrBg-Q68UWpFZibTW-oKKw;%)f9pQ{$V&bVSpH8P^LV~Yj4Z?9bz$n9EYNI&9{+_! z0G`y;nH|^UnOaZV!ukVRd3`2ZYMx1}P?~Rm?5F6&Dw8g%a6F(2c$+Pff{*CUt!3W1 zE2+nbQ{{Hp1iS@i$`@`CcsZ`Bf#taj;AfOG?tD;jkN!io2Hv++ zI&pSZi=hJq>2E5%o}2EpZ;1LCX7$%H$(avLi3@J<6c|tObK~PdvH@0P(F`q$;s77+ zdfgpWyoUpHF2*8?np3!37rAw0);-GUNZ)3UB&Q@}LNoO|sDLI$DtQa(RHWE82x4L8 zHB*QadpIrfHKjUG)~_smbJj9Z^W=7eSAT}H+O9*VB&{>v#YGw#8h&`IZ-*q~5cs7T zu-9U3p36jMhE|xSs3|~#PyVd<@|oP6+J87fehRCyi5TMd$6Jr&Gdts}i^9-O~LCL5(%ZcvkOpb@rb{ zf@BG)aXw7S0L*bKe+Me?BO?FLM&z6+lUAS2dg2i8KfXz_-CVNSVlqm;!>vwxHl|ff zA6B~7Wb9+nP44~j7MwD*Qd%=m8244ti9>eOvF5>q1l3-_M7KoYgSxAj$@lKpKE%rp zw^cxUG$`d9906YdTgG4Kd~^c7&&>c%917r+$PB^UFI`|DeOF~$e}#;9X2Cs%WGsQ^ zfF-EyfW?abZl_XdMThbdo*) zT-zU^E_YgT;6i3g4w7m&q)epHTr=d&v82088$OCbGZWra4SkE{P$SgxS&U5*zb=_( z&yWHxHoO-+4fg25DcRE#vwX|o>?mv+!aka~RpWDOS>b1M*f1VI8Jl-r2L(t}jA7_A zTzS&VFz*El9R~p}IOLKVWGdRprVK;y>3b{R0BR@)@Gai=_ED)kP?prhh7@zY3Gw}z zoSzN!^R%_`zKSVN%FMo7LGt_6`-%|2BRnFD{?`uc`-dWo(Se9%zI8|~fGmN4;h{d7 z#9$o0UHPlpfY{vE??*g-4=VCk7=y1$p=nYKOe*(^98k!nb|m6YSiKynL`}0PnUkdz z*O~6CPNBszFmwe}%3nYwPXu25r3(qv8=&um}cM04|k z9mp|XF?v+Hg3MVi0QSt-9K#Nijvg!>HyN}tqGYmqq?|yb$K_HtKfGJ*BQxVFrwB>2 zxX(E>t3w4fTuoIv*2RoqN>+AEqv+CUHI)X%=sC`7N+3Pq2g$^ zA&&dBGuC_=%MZ7oLo+kIu4j0VMqkcT*BY|M$5F2z#r)oRGKf27Tz*_?8WeGHOte0V zWmn)!YBkS z(`T070F|=G>F;4qz<_Rfsjo{?X%*>9ImrG7KaLG?&^G{mz0{|;WfNA{r++86S(y79 zlmCCIO#gST1D7qb^?>8dXun`n^K1R8i6n|$s1^$KE;h8IH=8t%eMG}5TO!=uI89YL=UqMVL^fOTm6NnSb8IhtEblCp* z0Gev)=&j8RIPL2O4Zw`A`zIuE*#d5o#TVXx!)@#r5=#}-rFPNYKT=Bf^y3M|Q`j-; z0LoAc?cF%1hL5%g_!mjX8wm90us(Aw1f()scLJ!76$l)q=X9D)2o$S>*J*h;doWyW zsvn#unYfpt$>+#27!P5yf43HiLfMJ#RoLm>T#SC7wQaubprJ8)3U=c{D_nS$Fg^H} zdhfhD-~IBkcixGtPa<>piJ?Qnc)o%y^F*iNw8ru^Qb~>CTXR`_m;9C&zZW+cDPr~m%t1#+g}mp1hC{lVOq_@5aTD}*%L1IC>H10g5j3 ALjV8( literal 0 HcmV?d00001 diff --git a/media/figures.pptx b/scenarios/media/figures.pptx similarity index 100% rename from media/figures.pptx rename to scenarios/media/figures.pptx diff --git a/media/intro_ic_vis.jpg b/scenarios/media/intro_ic_vis.jpg similarity index 100% rename from media/intro_ic_vis.jpg rename to scenarios/media/intro_ic_vis.jpg diff --git a/media/intro_is_vis.jpg b/scenarios/media/intro_is_vis.jpg similarity index 100% rename from media/intro_is_vis.jpg rename to scenarios/media/intro_is_vis.jpg diff --git a/media/intro_iseg_vis.jpg b/scenarios/media/intro_iseg_vis.jpg similarity index 100% rename from media/intro_iseg_vis.jpg rename to scenarios/media/intro_iseg_vis.jpg diff --git a/media/intro_od_vis.jpg b/scenarios/media/intro_od_vis.jpg similarity index 100% rename from media/intro_od_vis.jpg rename to scenarios/media/intro_od_vis.jpg diff --git a/tests/smoke/test_azureml_notebooks.py b/tests/smoke/test_azureml_notebooks.py index 5d6f877..0ef8ca5 100644 --- a/tests/smoke/test_azureml_notebooks.py +++ b/tests/smoke/test_azureml_notebooks.py @@ -3,6 +3,7 @@ import papermill as pm import pytest +import scrapbook as sb # Unless manually modified, cv should be # the name of the current jupyter kernel @@ -11,8 +12,11 @@ KERNEL_NAME = "cv" OUTPUT_NOTEBOOK = "output.ipynb" +# ----- Image classification ---------------------------------------------------------- + + @pytest.mark.azuremlnotebooks -def test_20_notebook_run( +def test_ic_20_notebook_run( classification_notebooks, subscription_id, resource_group, @@ -35,7 +39,7 @@ def test_20_notebook_run( @pytest.mark.azuremlnotebooks -def test_21_notebook_run( +def test_ic_21_notebook_run( classification_notebooks, subscription_id, resource_group, @@ -60,7 +64,7 @@ def test_21_notebook_run( @pytest.mark.azuremlnotebooks -def test_22_notebook_run( +def test_ic_22_notebook_run( classification_notebooks, subscription_id, resource_group, @@ -85,7 +89,7 @@ def test_22_notebook_run( @pytest.mark.azuremlnotebooks -def test_23_notebook_run( +def test_ic_23_notebook_run( classification_notebooks, subscription_id, resource_group, @@ -108,7 +112,7 @@ def test_23_notebook_run( @pytest.mark.azuremlnotebooks -def test_24_notebook_run( +def test_ic_24_notebook_run( classification_notebooks, subscription_id, resource_group, @@ -135,4 +139,34 @@ def test_24_notebook_run( ) -# TODO add test for hyperparam object detection notebook +# # ----- Object detection ---------------------------------------------------------- + + +@pytest.mark.azuremlnotebooks +def test_od_11_notebook_run( + detection_notebooks, + subscription_id, + resource_group, + workspace_name, + workspace_region, +): + notebook_path = detection_notebooks["11"] + pm.execute_notebook( + notebook_path, + OUTPUT_NOTEBOOK, + parameters=dict( + PM_VERSION=pm.__version__, + subscription_id=subscription_id, + resource_group=resource_group, + workspace_name=workspace_name, + workspace_region=workspace_region, + MAX_NODES=3, + IM_MAX_SIZES=[200], + LEARNING_RATES=[1e-5, 3e-3], + UTILS_DIR="utils_cv", + ), + kernel_name=KERNEL_NAME, + ) + + nb_output = sb.read_notebook(OUTPUT_NOTEBOOK) + assert nb_output.scraps["best_accuracy"].data > 0.70