diff --git a/.flake8 b/.flake8 index f836d49..4d579d9 100644 --- a/.flake8 +++ b/.flake8 @@ -7,6 +7,10 @@ # E501 Line too long (82 > 79 characters) # W503 Line break occurred before a binary operator # F403 'from module import *' used; unable to detect undefined names +# F405 '' may be undefined, or defined from star imports +# E402 module level import not at top of file +# E731 do not assign a lambda expression, use a def +# F821 undefined name 'get_ipython' --> from generated python files using nbconvert -ignore = E203, E266, E501, W503, F403 -max-line-length = 79 \ No newline at end of file +ignore = E203, E266, E501, W503, F403, F405, E402, E731, F821 +max-line-length = 79 diff --git a/.gitignore b/.gitignore index ec91eda..8ee0749 100644 --- a/.gitignore +++ b/.gitignore @@ -114,3 +114,6 @@ image_classification/data/* # don't save .swp files *.swp + +# don't save .csv files +*.csv diff --git a/image_classification/environment.yml b/image_classification/environment.yml index 0b055a4..921c43a 100644 --- a/image_classification/environment.yml +++ b/image_classification/environment.yml @@ -35,6 +35,7 @@ dependencies: - azureml-sdk[notebooks,contrib]==1.0.10 - azure-storage>=0.36.0 - black>=18.6b4 + - lxml>=4.3.2 - torchvision - memory-profiler>=0.54.0 - nvidia-ml-py3>=7.352.0 diff --git a/image_classification/notebooks/00_webcam.ipynb b/image_classification/notebooks/00_webcam.ipynb index 54379e1..e74699f 100644 --- a/image_classification/notebooks/00_webcam.ipynb +++ b/image_classification/notebooks/00_webcam.ipynb @@ -54,14 +54,17 @@ "source": [ "import sys\n", "sys.path.append(\"../\")\n", - "import io, time, urllib.request\n", + "import io\n", + "import time\n", + "import urllib.request\n", "import fastai\n", "from fastai.vision import *\n", "from ipywebrtc import CameraStream, ImageRecorder\n", "from ipywidgets import HBox, Label, Layout, Widget\n", "from torch.cuda import get_device_name\n", "from utils_ic.constants import IMAGENET_IM_SIZE\n", - "from utils_ic.datasets import imagenet_labels, data_path\n", + "from utils_ic.datasets import imagenet_labels\n", + "from utils_ic.common import data_path\n", "from utils_ic.imagenet_models import model_to_learner\n", "\n", "\n", diff --git a/image_classification/notebooks/01_training_introduction.ipynb b/image_classification/notebooks/01_training_introduction.ipynb index 5597f30..715a0ef 100644 --- a/image_classification/notebooks/01_training_introduction.ipynb +++ b/image_classification/notebooks/01_training_introduction.ipynb @@ -76,9 +76,9 @@ "import sys\n", "sys.path.append(\"../\")\n", "from pathlib import Path\n", - "from utils_ic.datasets import Urls, unzip_url, data_path\n", + "from utils_ic.datasets import Urls, unzip_url\n", "from fastai.vision import *\n", - "from fastai.metrics import error_rate, accuracy" + "from fastai.metrics import accuracy" ] }, { diff --git a/image_classification/notebooks/11_exploring_hyperparameters.ipynb b/image_classification/notebooks/11_exploring_hyperparameters.ipynb new file mode 100644 index 0000000..d5cefcd --- /dev/null +++ b/image_classification/notebooks/11_exploring_hyperparameters.ipynb @@ -0,0 +1,1019 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Testing different Hyperparameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Lets say we want to learn more about __how different learning rates and different image sizes affect our model's accuracy when restricted to 10 epochs__, and we want to build an experiment to test out these hyperparameters.\n", + "\n", + "In this notebook, we'll walk through how we use out Parameter Sweeper module with the following:\n", + "\n", + "- use python to perform this experiment\n", + "- use the CLI to perform this experiment\n", + "- evalute the results using Pandas" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "sys.path.append(\"../\")\n", + "import os\n", + "\n", + "from utils_ic.common import ic_root_path\n", + "from utils_ic.datasets import unzip_url, Urls\n", + "from utils_ic.parameter_sweeper import *" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Lets download some data that we want to test on. To use the Parameter Sweeper tool for single label classification, we'll need to make sure that the data is stored such that images are sorted into their classes inside of a subfolder. In this notebook, we'll use the Fridge Objects dataset provided in `utils_ic.datasets.Urls`, which is stored in the correct format." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "input_data = unzip_url(Urls.fridge_objects_path, exist_ok=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Python" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We start by creating the Parameter Sweeper object:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "sweeper = ParameterSweeper()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before we start testing, it's a good idea to see what the default parameters Are. We can use a the property `parameters` to easily see those default values." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "OrderedDict([('learning_rate', [0.0001]),\n", + " ('epochs', [15]),\n", + " ('batch_size', [16]),\n", + " ('im_size', [299]),\n", + " ('architecture',\n", + " [)>]),\n", + " ('transform', [True]),\n", + " ('dropout', [0.5]),\n", + " ('weight_decay', [0.01]),\n", + " ('training_schedule',\n", + " []),\n", + " ('discriminative_lr', [False]),\n", + " ('one_cycle_policy', [True])])" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sweeper.parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we know the defaults, we can pass it the parameters we want to test. \n", + "\n", + "In this notebook, we want to see the effect of different learning rates across different image sizes using only 8 epochs (the default number of epochs is 15). To do so, I would run the `update_parameters` functions as follows:\n", + "\n", + "```python\n", + "sweeper.update_parameters(learning_rate=[1e-3, 1e-4, 1e-5], im_size=[299, 499], epochs=[10])\n", + "```\n", + "\n", + "Notice that all parameters must be passed in as a list, including single values such the number of epochs.\n", + "\n", + "These parameters will be used to calculate the number of permutations to run. In this case, we've pass in three options for learning rates, two for image sizes, and one for number of epochs. This will result in 3 X 2 X 1 total permutations (in otherwords, 6 permutations). " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "sweeper.update_parameters(learning_rate=[1e-3, 1e-4, 1e-5], im_size=[299, 499], epochs=[10])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have our parameters defined, we call the `run()` function with the dataset to test on. \n", + "\n", + "We can also optionally pass in:\n", + "- the number of repetitions to run each permutation (default is 3)\n", + "- whether or not we want the training to stop early if the metric (accuracy) doesn't improve by 0.01 (1%) over 3 epochs (default is False)\n", + "\n", + "The `run` function returns a multi-index dataframe which we can work with right away." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "this Learner object self-destroyed - it still exists, but no longer usable\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
durationaccuracy
0PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.6308600.818182
PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects25.9823700.954545
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects21.5507760.954545
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects27.2244950.795455
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.7992830.340909
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.2790590.363636
1PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.7838180.840909
PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.0381970.818182
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.7700780.863636
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.1361800.886364
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.8745320.363636
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.5392960.363636
2PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.9561390.909091
PARAMETERS [learning_rate: 0.0001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.3146720.931818
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects16.8849890.909091
PARAMETERS [learning_rate: 0.001]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects25.9475700.931818
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 299]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects17.1157050.409091
PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|[batch_size: 16]|[im_size: 499]|[arch: resnet18]|[transforms: True]|[dropout: 0.5]|[weight_decay: 0.01]|[training_schedule: head_first_then_body]|[discriminative_lr: False]|[one_cycle_policy: True]fridgeObjects26.2033810.500000
\n", + "
" + ], + "text/plain": [ + " duration \\\n", + "0 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 16.630860 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 25.982370 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 21.550776 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 27.224495 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 16.799283 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 26.279059 \n", + "1 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 16.783818 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 26.038197 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 16.770078 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 26.136180 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 16.874532 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 26.539296 \n", + "2 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 16.956139 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 26.314672 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 16.884989 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 25.947570 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 17.115705 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 26.203381 \n", + "\n", + " accuracy \n", + "0 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.818182 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.954545 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.954545 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.795455 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.340909 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.363636 \n", + "1 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.840909 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.818182 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.863636 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.886364 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.363636 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.363636 \n", + "2 PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.909091 \n", + " PARAMETERS [learning_rate: 0.0001]|[epochs: 10]... fridgeObjects 0.931818 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.909091 \n", + " PARAMETERS [learning_rate: 0.001]|[epochs: 10]|... fridgeObjects 0.931818 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.409091 \n", + " PARAMETERS [learning_rate: 1e-05]|[epochs: 10]|... fridgeObjects 0.500000 " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = sweeper.run(datasets=[input_data], reps=3); df" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using the CLI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instead of using python to run this experiment, we may want to test from the CLI. We can do so by using the `scripts/benchmark.py` file. \n", + "\n", + "First we move up to the `/image_classification` directory." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "os.chdir(ic_root_path())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To reproduce the same test (different learning rates across different image sizes using only 8 epochs), and the same settings (3 repetitions, and no early_stopping) we can run the following:\n", + "\n", + "```sh\n", + "python scripts/sweep.py \n", + " --learning-rates 1e-3 1e-4 1e-5\n", + " --im-size 99 299\n", + " --epochs 5 \n", + " --repeat 3 \n", + " --no-early-stopping \n", + " --inputs \n", + " --output lr_bs_test.csv\n", + "```\n", + "\n", + "Additionally, we've added an output parameter, which will automatically dump our dataframe into a csv file.\n", + "\n", + "To simplify the command, we can use the acryonyms of the params. We can also remove `--no-early-stopping` as that is the default behavior.\n", + "\n", + "```sh\n", + "python scripts/sweep.py -lr 1e-3 1e-4 1e-5 -is 99 299 -e 5 -i -o lr_bs_test.csv\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# use {sys.executable} instead of just running `python` to ensure the command is executed using the environment cvbp\n", + "!{sys.executable} scripts/sweep.py -lr 1e-3 1e-4 1e-5 -is 99 299 -e 5 -i {input_data} -o data/lr_bs_test.csv" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the script completes, load the csv into a dataframe to explore it's contents. We'll want to specify `index_col=[0, 1, 2]` since it is a multi-index dataframe.\n", + "\n", + "```python\n", + "df = pd.read_csv(\"data/lr_bs_test.csv\", index_col=[0, 1, 2])\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "HINT: You can learn more about how to use the script with the `--help` flag." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "usage: sweep.py [-h] [--learning-rate LEARNING_RATES [LEARNING_RATES ...]]\r\n", + " [--epoch EPOCHS [EPOCHS ...]]\r\n", + " [--batch-size BATCH_SIZES [BATCH_SIZES ...]]\r\n", + " [--im-size IM_SIZES [IM_SIZES ...]]\r\n", + " [--architecture {squeezenet1_1,resnet18,resnet34,resnet50} [{squeezenet1_1,resnet18,resnet34,resnet50} ...]]\r\n", + " [--transform TRANSFORMS [TRANSFORMS ...]]\r\n", + " [--dropout DROPOUTS [DROPOUTS ...]]\r\n", + " [--weight-decay WEIGHT_DECAYS [WEIGHT_DECAYS ...]]\r\n", + " [--training-schedule {head_only,body_only,head_first_then_body} [{head_only,body_only,head_first_then_body} ...]]\r\n", + " [--discriminative-lr {True,False} [{True,False} ...]]\r\n", + " [--one-cycle-policy ONE_CYCLE_POLICIES [ONE_CYCLE_POLICIES ...]]\r\n", + " (--inputs INPUTS [INPUTS ...] | --benchmark)\r\n", + " [--early-stopping] [--repeat REPEAT] [--output OUTPUT]\r\n", + " [--clean-up]\r\n", + "\r\n", + "This script is used to benchmark the different hyperparameters when it comes to doing image classification.\r\n", + "\r\n", + "This script will run all permutations of the parameters that are passed in.\r\n", + "\r\n", + "This script will run these tests on datasets provided in this repo. It will\r\n", + "create a temporary data directory, and delete it at the end.\r\n", + "\r\n", + "This script uses accuracy as the evaluation metric.\r\n", + "\r\n", + "Use [-W ignore] to ignore warning messages when running the script.\r\n", + "\r\n", + "optional arguments:\r\n", + " -h, --help show this help message and exit\r\n", + " --learning-rate LEARNING_RATES [LEARNING_RATES ...], -lr LEARNING_RATES [LEARNING_RATES ...]\r\n", + " Learning rate - recommended options: [1e-3, 1e-4, 1e-5] \r\n", + " --epoch EPOCHS [EPOCHS ...], -e EPOCHS [EPOCHS ...]\r\n", + " Epochs - recommended options: [3, 5, 10, 15]\r\n", + " --batch-size BATCH_SIZES [BATCH_SIZES ...], -bs BATCH_SIZES [BATCH_SIZES ...]\r\n", + " Batch sizes - recommended options: [8, 16, 32, 64]\r\n", + " --im-size IM_SIZES [IM_SIZES ...], -is IM_SIZES [IM_SIZES ...]\r\n", + " Image sizes - recommended options: [299, 499]\r\n", + " --architecture {squeezenet1_1,resnet18,resnet34,resnet50} [{squeezenet1_1,resnet18,resnet34,resnet50} ...], -a {squeezenet1_1,resnet18,resnet34,resnet50} [{squeezenet1_1,resnet18,resnet34,resnet50} ...]\r\n", + " Choose an architecture.\r\n", + " --transform TRANSFORMS [TRANSFORMS ...], -t TRANSFORMS [TRANSFORMS ...]\r\n", + " Tranform (data augmentation) - options: [True, False]\r\n", + " --dropout DROPOUTS [DROPOUTS ...], -d DROPOUTS [DROPOUTS ...]\r\n", + " Dropout - recommended options: [0.5]\r\n", + " --weight-decay WEIGHT_DECAYS [WEIGHT_DECAYS ...], -wd WEIGHT_DECAYS [WEIGHT_DECAYS ...]\r\n", + " Weight decay - recommended options: [0.01]\r\n", + " --training-schedule {head_only,body_only,head_first_then_body} [{head_only,body_only,head_first_then_body} ...], -ts {head_only,body_only,head_first_then_body} [{head_only,body_only,head_first_then_body} ...]\r\n", + " Choose a training schedule\r\n", + " --discriminative-lr {True,False} [{True,False} ...], -dl {True,False} [{True,False} ...]\r\n", + " Discriminative learning rate - options: [True, False]. To use discriminative learning rates, training schedule must not be 'head_only'\r\n", + " --one-cycle-policy ONE_CYCLE_POLICIES [ONE_CYCLE_POLICIES ...], -ocp ONE_CYCLE_POLICIES [ONE_CYCLE_POLICIES ...]\r\n", + " one cycle policy - options: [True, False]\r\n", + " --inputs INPUTS [INPUTS ...], -i INPUTS [INPUTS ...]\r\n", + " A list of data paths to run the tests on. The datasets must be structured so that each class is in a separate folder. <--benchmark> must be False\r\n", + " --benchmark Whether or not to use curated benchmark datasets to test. <--input> must be empty\r\n", + " --early-stopping Stop training early if possible\r\n", + " --repeat REPEAT, -r REPEAT\r\n", + " The number of times to repeat each permutation\r\n", + " --output OUTPUT, -o OUTPUT\r\n", + " The path of the output file.\r\n", + " --clean-up Remove input data or temporary data after the run. WARNING: If this flag is set, it will permanently remove input data.\r\n", + "\r\n", + "Example usage:\r\n", + "OrderedDict([('learning_rate', [0.0001]), ('epochs', [15]), ('batch_size', [16]), ('im_size', [299]), ('architecture', [)>]), ('transform', [True]), ('dropout', [0.5]), ('weight_decay', [0.01]), ('training_schedule', []), ('discriminative_lr', [False]), ('one_cycle_policy', [True])])\r\n", + "\r\n", + "# Test the effect of 3 learning rates on 3 batch sizes\r\n", + "$ python benchmark.py -lr 1e-3 1e-4 1e-5 -bs 8 16 32 -o learning_rate_batch_size.csv\r\n", + "\r\n", + "# Test the effect of one cycle policy without using discriminative learning rates over 5 runs\r\n", + "$ python benchmark.py -dl False -ocp True False -r 5 -o ocp_dl.csv\r\n", + "\r\n", + "# Test different architectures and image sizes\r\n", + "$ python benchmark.py -a squeezenet1_1 resenet18 resnet50 -is 299 499 -o arch_im_sizes.csv\r\n", + "\r\n", + "# Test different training schedules over 3 runs\r\n", + "$ python benchmark.py -ts body_only head_first_then_body -r 3 -o training_schedule.csv\r\n", + "\r\n", + "---\r\n", + "\r\n", + "To view results, we recommend using pandas dataframes:\r\n", + "\r\n", + "```\r\n", + "import pandas as pd\r\n", + "df = pd.read_csv(\"results.csv\", index_col=[0, 1, 2])\r\n", + "```\r\n" + ] + } + ], + "source": [ + "!{sys.executable} scripts/sweep.py --help" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualizing our results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When we read in out multi-index dataframe, index 0 represents the run number, index 1 represents a single permutation of parameters, and index 2 represents the dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To see the results, show the df using the `clean_df` helper function. This will display all the hyperparameters in a nice, readable way." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
durationaccuracy
0P: [learning_rate: 0.0001] [im_size: 299]fridgeObjects16.6308600.818182
P: [learning_rate: 0.0001] [im_size: 499]fridgeObjects25.9823700.954545
P: [learning_rate: 0.001] [im_size: 299]fridgeObjects21.5507760.954545
P: [learning_rate: 0.001] [im_size: 499]fridgeObjects27.2244950.795455
P: [learning_rate: 1e-05] [im_size: 299]fridgeObjects16.7992830.340909
P: [learning_rate: 1e-05] [im_size: 499]fridgeObjects26.2790590.363636
1P: [learning_rate: 0.0001] [im_size: 299]fridgeObjects16.7838180.840909
P: [learning_rate: 0.0001] [im_size: 499]fridgeObjects26.0381970.818182
P: [learning_rate: 0.001] [im_size: 299]fridgeObjects16.7700780.863636
P: [learning_rate: 0.001] [im_size: 499]fridgeObjects26.1361800.886364
P: [learning_rate: 1e-05] [im_size: 299]fridgeObjects16.8745320.363636
P: [learning_rate: 1e-05] [im_size: 499]fridgeObjects26.5392960.363636
2P: [learning_rate: 0.0001] [im_size: 299]fridgeObjects16.9561390.909091
P: [learning_rate: 0.0001] [im_size: 499]fridgeObjects26.3146720.931818
P: [learning_rate: 0.001] [im_size: 299]fridgeObjects16.8849890.909091
P: [learning_rate: 0.001] [im_size: 499]fridgeObjects25.9475700.931818
P: [learning_rate: 1e-05] [im_size: 299]fridgeObjects17.1157050.409091
P: [learning_rate: 1e-05] [im_size: 499]fridgeObjects26.2033810.500000
\n", + "
" + ], + "text/plain": [ + " duration accuracy\n", + "0 P: [learning_rate: 0.0001] [im_size: 299] fridgeObjects 16.630860 0.818182\n", + " P: [learning_rate: 0.0001] [im_size: 499] fridgeObjects 25.982370 0.954545\n", + " P: [learning_rate: 0.001] [im_size: 299] fridgeObjects 21.550776 0.954545\n", + " P: [learning_rate: 0.001] [im_size: 499] fridgeObjects 27.224495 0.795455\n", + " P: [learning_rate: 1e-05] [im_size: 299] fridgeObjects 16.799283 0.340909\n", + " P: [learning_rate: 1e-05] [im_size: 499] fridgeObjects 26.279059 0.363636\n", + "1 P: [learning_rate: 0.0001] [im_size: 299] fridgeObjects 16.783818 0.840909\n", + " P: [learning_rate: 0.0001] [im_size: 499] fridgeObjects 26.038197 0.818182\n", + " P: [learning_rate: 0.001] [im_size: 299] fridgeObjects 16.770078 0.863636\n", + " P: [learning_rate: 0.001] [im_size: 499] fridgeObjects 26.136180 0.886364\n", + " P: [learning_rate: 1e-05] [im_size: 299] fridgeObjects 16.874532 0.363636\n", + " P: [learning_rate: 1e-05] [im_size: 499] fridgeObjects 26.539296 0.363636\n", + "2 P: [learning_rate: 0.0001] [im_size: 299] fridgeObjects 16.956139 0.909091\n", + " P: [learning_rate: 0.0001] [im_size: 499] fridgeObjects 26.314672 0.931818\n", + " P: [learning_rate: 0.001] [im_size: 299] fridgeObjects 16.884989 0.909091\n", + " P: [learning_rate: 0.001] [im_size: 499] fridgeObjects 25.947570 0.931818\n", + " P: [learning_rate: 1e-05] [im_size: 299] fridgeObjects 17.115705 0.409091\n", + " P: [learning_rate: 1e-05] [im_size: 499] fridgeObjects 26.203381 0.500000" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = clean_df(df); df " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since we've run our benchmarking over 3 repetitions, we may want to just look at the averages across the different __run numbers__." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
P: [learning_rate: 0.0001] [im_size: 299]P: [learning_rate: 0.0001] [im_size: 499]P: [learning_rate: 0.001] [im_size: 299]P: [learning_rate: 0.001] [im_size: 499]P: [learning_rate: 1e-05] [im_size: 299]P: [learning_rate: 1e-05] [im_size: 499]
fridgeObjectsfridgeObjectsfridgeObjectsfridgeObjectsfridgeObjectsfridgeObjects
duration16.79027226.11174618.40194826.43608216.92984026.340579
accuracy0.8560610.9015150.9090910.8712120.3712120.409091
\n", + "
" + ], + "text/plain": [ + " P: [learning_rate: 0.0001] [im_size: 299] \\\n", + " fridgeObjects \n", + "duration 16.790272 \n", + "accuracy 0.856061 \n", + "\n", + " P: [learning_rate: 0.0001] [im_size: 499] \\\n", + " fridgeObjects \n", + "duration 26.111746 \n", + "accuracy 0.901515 \n", + "\n", + " P: [learning_rate: 0.001] [im_size: 299] \\\n", + " fridgeObjects \n", + "duration 18.401948 \n", + "accuracy 0.909091 \n", + "\n", + " P: [learning_rate: 0.001] [im_size: 499] \\\n", + " fridgeObjects \n", + "duration 26.436082 \n", + "accuracy 0.871212 \n", + "\n", + " P: [learning_rate: 1e-05] [im_size: 299] \\\n", + " fridgeObjects \n", + "duration 16.929840 \n", + "accuracy 0.371212 \n", + "\n", + " P: [learning_rate: 1e-05] [im_size: 499] \n", + " fridgeObjects \n", + "duration 26.340579 \n", + "accuracy 0.409091 " + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.mean(level=(1,2)).T" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Additionally, we may want simply to see which set of hyperparameters perform the best across the different __datasets__. We can do that by averaging the results of the different datasets. (The results of this step will look similar to the above since we're only passing in one dataset)." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
P: [learning_rate: 0.0001] [im_size: 299]P: [learning_rate: 0.0001] [im_size: 499]P: [learning_rate: 0.001] [im_size: 299]P: [learning_rate: 0.001] [im_size: 499]P: [learning_rate: 1e-05] [im_size: 299]P: [learning_rate: 1e-05] [im_size: 499]
duration16.79027226.11174618.40194826.43608216.92984026.340579
accuracy0.8560610.9015150.9090910.8712120.3712120.409091
\n", + "
" + ], + "text/plain": [ + " P: [learning_rate: 0.0001] [im_size: 299] \\\n", + "duration 16.790272 \n", + "accuracy 0.856061 \n", + "\n", + " P: [learning_rate: 0.0001] [im_size: 499] \\\n", + "duration 26.111746 \n", + "accuracy 0.901515 \n", + "\n", + " P: [learning_rate: 0.001] [im_size: 299] \\\n", + "duration 18.401948 \n", + "accuracy 0.909091 \n", + "\n", + " P: [learning_rate: 0.001] [im_size: 499] \\\n", + "duration 26.436082 \n", + "accuracy 0.871212 \n", + "\n", + " P: [learning_rate: 1e-05] [im_size: 299] \\\n", + "duration 16.929840 \n", + "accuracy 0.371212 \n", + "\n", + " P: [learning_rate: 1e-05] [im_size: 499] \n", + "duration 26.340579 \n", + "accuracy 0.409091 " + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.mean(level=(1)).T" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To make it easier to see which permutation did the best, we can plot the results using the `plot_df` helper function. This plot will help us easily see which parameters offer the highest accuracies." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtQAAAKhCAYAAABwwjmCAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzs3XuclWW9///XB0GtNNEv0AbRcBfbgEFACdBCKUINTd2IBlpiYOy2tfN8aJt5aP/S1FQ0D3lKUpOyg5CSpKBpGeGgaCh5yEMghoAHzPHA4fP7Yy2mAYbTrFlrMfB6Ph7zmLkP131/7rmzea+L677uyEwkSZIkNU2rahcgSZIktWQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSdqERMRvI2JUmY69f0TcWY5jN7eIuDki/m8D9psRET0qUZMkrY2BWpKAiHgxIt6JiLci4o2IeDgivhYRZfv/yYg4NyJubbguMz+fmePLdMrvAReW6djVcglwfrWLkLRlM1BL0r98ITO3Bz5KIXieAdzYlANFROvmLKxUEfFJYIfMnF7tWprZJOAzEdGx2oVI2nIZqCVpNZn5ZmZOAr4IjIqIGoCIeCAijlu5X0QcGxF/aLCcEfH1iHgWeLa4blxEzI2IJRExMyIGFtcfCPwv8MWI+GdEPL76OSKiVUR8OyJeiohXI+InEbFDcVuX4vlGRcTfI2JRRJy1jsv6PPD7BrVGRFxWPO6bEfFEg+vcJiIuKR53QURcGxEfaND20IiYVbymvxWvhYjoFBGTIuK1iHguIr7aoM25EfHz4jW8FRFPRkTfBtv7RMSjxW0/A7ZtsK1dRNxV/JeD1yLioZX/cpCZ7wIzgf035N5KUjkYqCVpLTJzBjAPGLgRzQ4D+gPdi8uPAL2BnYCfAndExLaZeQ+FIRg/y8ztMrNXI8c6tvj1GeDfge2AH662z6eB3YHBwHciotta6uoJPN1geX9gX+A/gLYUPjwsLm77fnF9b+DjwM7AdwAioh/wE+C0Yrt9gReL7W6n8PvqBAwHvhcRgxuc8xBgQrHdpJXXEhFbA3cCtxR/T3cAhzdod0rxuO2Bj1D4IJINts8BGvv9SVJFGKglad3mUwh5G+qCzHwtM98ByMxbM3NxZi7LzB8A21AIwBviaODSzHw+M/8JfAsYsdpwkvMy853MfBx4nLUHy7bAWw2WlwLbA58AIjPnZOYrERHAV4GTitfxFoXgP6LYbgxwU2bem5krMvPlzPxrROxCIdyfkZnvZuYs4Abgyw3O+YfMnJyZyymE55W1DgDaAJdn5tLM/AWFDyINa+0IfLS4/aHMbBio3ypenyRVhYFaktZtZ+C1jdh/bsOFiDglIuYUh1W8AewAtNvAY3UCXmqw/BLQmkIv7Ur/aPBzHYVe7Ma8TiFAA5CZ0yj0EF8FLIiI6yLiwxR6gT8IzCwOsXgDuKe4HmAX4G9rqXVlAG9Y787rqHXb4oeDTsDLq4Xkhtd9MfAc8LuIeD4izlzt3NsDb6zluiWp7AzUkrQWxQf5dgZWjpN+m0LYXOnfGmlWHwqL46XPAI4EdszMtsCbQKy+71rMp/CA5Eq7AsuABRt4CQ09QWEYx78KzbwiM/cCehS3nQYsAt4BemRm2+LXDpm5MqjPBT62llp3iojtG6zbFXh5A2p7Bdi52DvesO3KOt/KzFMy89+BLwAnrzaUpBuF3nlJqgoDtSStJiI+HBEHUxjve2tm/qW4aRYwLCI+GBEfpzD8YV22pxCAFwKtI+I7wIcbbF8AdFnH1Hy3AydFxG4RsR3/GnO9rAmXNRnYb+VCRHwyIvpHRBsKHxTeBZZn5grgeuCyiOhQ3HfniDig2PRG4CsRMbj40OTOEfGJzJwLPAxcEBHbRsQeFH4/t21AbX+i8Hv6ZkS0johhQL8GtR4cER8vBu4lwPLiFxGxDbAXcG8TfieS1CwM1JL0L7+JiLco9MKeBVwKfKXB9suA9ykE4fGsPyxOAX4LPENhCMO7rDok5I7i98UR8Wgj7W+iMNb4QeCFYvv/2YjrqZeZjwJvRkT/4qoPUwjOrxdrW0xhTmco9Ko/B0yPiCXAfRTHfRcf1PwKhd/FmxRmDlnZiz4S6EKht/rXwDmZud6gm5nvA8MoPID5OoUHJH/VYJeuxRr+SSF8X52ZDxS3HQI8kJnzN+gXIUllEKsOWZMkba4iYn/g+Mw8rNq1NJeI+DMwJjNnV7sWSVsuA7UkSZJUAod8SJIkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSVovf5dNi3t2rXLLl26VLsMSZIkbeZmzpy5KDPbr2+/Fheou3TpQm1tbbXLkCRJ0mYuIl7akP0c8iFJkiSVwEAtSZIklcBALUmSJJXAQC1JkiSVwEAtSZIklaAigToito2IGRHxeEQ8GRHnFdfvFhF/johnI+JnEbF1JeqRJEmSmkuleqjfAz6bmb2A3sCBETEA+D5wWWZ2BV4HxlSoHkmSJKlZVCRQZ8E/i4ttil8JfBb4RXH9eOCwStQjSZIkNZeKjaGOiK0iYhbwKnAv8DfgjcxcVtxlHrBzpeqRJEmSmkPFAnVmLs/M3kBnoB/QrbHdGmsbEWMjojYiahcuXFjOMiVJkqSNUvFZPjLzDeABYADQNiJWvv68MzB/LW2uy8y+mdm3ffv1vk5dkiRJqphKzfLRPiLaFn/+APA5YA5wPzC8uNsoYGIl6pEkSZKaS+v179IsOgLjI2IrCiH+55l5V0Q8BUyIiP8DHgNurFA9kiRJUrOoSKDOzCeAPo2sf57CeGpJkiSpRfJNiZIkSVIJDNSSJElSCQzUkqRN0ty5c/nMZz5Dt27d6NGjB+PGjavfduWVV7L77rvTo0cPTj/99LUeY/ny5fTp04eDDz64EiVL2kJV6qFEqcWaO3cuxxxzDP/4xz9o1aoVY8eO5YQTTgAKf9R/+MMf0rp1aw466CAuuuiiVdq+++677Lvvvrz33nssW7aM4cOHc95551XjMqQWp3Xr1vzgBz9gzz335K233mKvvfZiyJAhLFiwgIkTJ/LEE0+wzTbb8Oqrr671GOPGjaNbt24sWbKkgpVL2tLYQy2tx8o/6nPmzGH69OlcddVVPPXUU9x///31f9SffPJJTj311DXabrPNNkybNo3HH3+cWbNmcc899zB9+vQqXIXU8nTs2JE999wTgO23355u3brx8ssvc80113DmmWeyzTbbANChQ4dG28+bN4+7776b4447rmI1619K/ReG0aNH06FDB2pqaipVstRkBmppPUr5ox4RbLfddgAsXbqUpUuXEhGVK17aTLz44os89thj9O/fn2eeeYaHHnqI/v37s99++/HII4802ubEE0/koosuolUr/9RVQymdEQDHHnss99xzT4WrlprG/5eRNkJT/qgvX76c3r1706FDB4YMGUL//v0rXLXUsv3zn//k8MMP5/LLL+fDH/4wy5Yt4/XXX2f69OlcfPHFHHnkkWTmKm3uuusuOnTowF577VWlqlXqvzDsu+++7LTTThWrVyqFgVraQE35ow6w1VZbMWvWLObNm8eMGTOYPXt2FaqXWqalS5dy+OGHc/TRRzNs2DAAOnfuzLBhw4gI+vXrR6tWrVi0aNEq7f74xz8yadIkunTpwogRI5g2bRpf+tKXqnEJommdEVJLYqCWNkBT/6g31LZtWwYNGuQ/YUobKDMZM2YM3bp14+STT65ff9hhhzFt2jQAnnnmGd5//33atWu3StsLLriAefPm8eKLLzJhwgQ++9nPcuutt1a0fhU0tTNCakkM1NJ6lPJHfeHChbzxxhsAvPPOO9x333184hOfqFzxUgv2xz/+kVtuuYVp06bRu3dvevfuzeTJkxk9ejTPP/88NTU1jBgxgvHjxxMRzJ8/n6FDh1a7bDXQHJ0RUkvgtHnSeqz8o96zZ0969+4NwPe+9z1Gjx7N6NGjqampYeutt17lj/pxxx3H5MmTeeWVVxg1ahTLly9nxYoVHHnkkc6HK22gT3/602vtuWyst7lTp05Mnjx5jfWDBg1i0KBBzV2e1mN9nRGDBg1aa2eE1NJES/tnlr59+2ZtbW21y5AkSevwhz/8gYEDB9KzZ8/6mVa+973v8bnPfY7Ro0cza9Ystt56ay655BI++9nPrtIZATBy5EgeeOABFi1axEc+8hHOO+88xowZU81L0hYoImZmZt/17megliRJkta0oYHaMdSSJElSCQzUkiRJUgkM1JIkSVIJDNSSJElSCZw2T5u1LmfeXe0SyubFCw+qdgnSWm3O/+2B//1JWpU91JIkSVIJ7KGWJEn1/NcFaePZQy1JkiSVwEAtSZIklaAigToidomI+yNiTkQ8GREnFNefGxEvR8Ss4tfQStQjSZIkNZdKjaFeBpySmY9GxPbAzIi4t7jtssy8pEJ1SJIkSc2qIoE6M18BXin+/FZEzAF2rsS5JUmSpHKq+BjqiOgC9AH+XFz1jYh4IiJuiogdK12PJEmSVIqKBuqI2A74JXBiZi4BrgE+BvSm0IP9g7W0GxsRtRFRu3DhworVK0mSJK1PxQJ1RLShEKZvy8xfAWTmgsxcnpkrgOuBfo21zczrMrNvZvZt3759pUqWJEmS1qtSs3wEcCMwJzMvbbC+Y4Pd/hOYXYl6JEmSpOZSqVk+PgV8GfhLRMwqrvtfYGRE9AYSeBH4rwrVI0mSJDWLSs3y8QcgGtk0uRLnlyRJksrFNyVKkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRpFaNHj6ZDhw7U1NTUr5s1axYDBgygd+/e9O3blxkzZqy1/ZIlS9h55535xje+UYlyq64igToidomI+yNiTkQ8GREnFNfvFBH3RsSzxe87VqIeSZIkrd2xxx7LPffcs8q6008/nXPOOYdZs2Zx/vnnc/rpp6+1/dlnn81+++1X7jI3GZXqoV4GnJKZ3YABwNcjojtwJjA1M7sCU4vLkiRJqqJ9992XnXbaaZV1EcGSJUsAePPNN+nUqVOjbWfOnMmCBQvYf//9y17npqJ1JU6Sma8ArxR/fisi5gA7A4cCg4q7jQceAM6oRE2SJEnacJdffjkHHHAAp556KitWrODhhx9eY58VK1ZwyimncMsttzB16tQqVFkdFR9DHRFdgD7An4GPFMP2ytDdodL1SJIkaf2uueYaLrvsMubOnctll13GmDFj1tjn6quvZujQoeyyyy5VqLB6KtJDvVJEbAf8EjgxM5dExIa2GwuMBdh1113LV6AkSZIaNX78eMaNGwfAEUccwXHHHbfGPn/605946KGHuPrqq/nnP//J+++/z3bbbceFF15Y6XIrqmKBOiLaUAjTt2Xmr4qrF0REx8x8JSI6Aq821jYzrwOuA+jbt29WpGBJkiTV69SpE7///e8ZNGgQ06ZNo2vXrmvsc9ttt9X/fPPNN1NbW7vZh2moUKCOQlf0jcCczLy0waZJwCjgwuL3iZWoR5IkSWs3cuRIHnjgARYtWkTnzp0577zzuP766znhhBNYtmwZ2267Lddddx0AtbW1XHvttdxwww1Vrrp6KtVD/Sngy8BfImJWcd3/UgjSP4+IMcDfgSMqVI8kSZLW4vbbb290/cyZM9dY17dv30bD9LHHHsuxxx7b3KVtkiryUGJm/iEzIzP3yMzexa/Jmbk4MwdnZtfi99cqUU81NDZBOsCVV17J7rvvTo8ePdY6n+O4ceOoqamhR48eXH755ZUoV5IkSRvINyVWSGMTpN9///1MnDiRJ554gieffJJTTz11jXazZ8/m+uuvZ8aMGTz++OPcddddPPvss5UqW2rxSvkwe9lll9GjRw9qamoYOXIk7777biVKliS1MAbqCmlsgvRrrrmGM888k2222QaADh3WnDVwzpw5DBgwgA9+8IO0bt2a/fbbj1//+tcVqVnaHDT1w+zLL7/MFVdcQW1tLbNnz2b58uVMmDChUmVLklqQjQ7UEfGhiGhV/Pk/IuKQ4gwe2kjPPPMMDz30EP3792e//fbjkUceWWOfmpoaHnzwQRYvXkxdXR2TJ09m7ty5VahWapma+mEWYNmyZbzzzjssW7aMurq6tb4VTJK0ZWtKD/WDwLYRsTOF14V/Bbi5OYvaUixbtozXX3+d6dOnc/HFF3PkkUeSueqsgN26deOMM85gyJAhHHjggfTq1YvWrSs6fbi02dmQD7M777wzp556KrvuuisdO3Zkhx122KJeoytJ2nBNCdSRmXXAMODKzPxPoHvzlrVl6Ny5M8OGDSMi6NevH61atWLRokVr7DdmzBgeffRRHnzwQXbaaadG532UtOE25MPs66+/zsSJE3nhhReYP38+b7/9NrfeemuVKpYkbcqa0tUZEbE3cDSw8p2Tdpk2wWGHHca0adMYNGgQzzzzDO+//z7t2rVbY79XX32VDh068Pe//51f/epX/OlPf6pCtdLmY20fZtu3b1+/z3333cduu+1Wv27YsGE8/PDDfOlLX6pW2ZK0Xl3OvLvaJZTVixceVO0SGtWUHuoTgW8Bv87MJyPi34H7m7eszc/IkSPZe++9efrpp+ncuTM33ngjo0eP5vnnn6empoYRI0Ywfvx4IoL58+czdOjQ+raHH3443bt35wtf+AJXXXUVO+64YxWvRGr5Vn6YBdb6YXbXXXdl+vTp1NXVkZlMnTqVbt26VaNcSdImbqN7ljPz98DvGyw/D3yzOYvaHK1tgvTG/gm5U6dOTJ48uX75oYceKltd0uausbd9jR49mtGjR1NTU8PWW2+9yofZ4447jsmTJ9O/f3+GDx/OnnvuSevWrenTpw9jx46t9uVIkjZBGxyoI+I3QK5te2Ye0iwVSVIzKuXD7Hnnncd5551XttokSZuHjemhvqT4fRjwb8DKv0YjgRebsSZJkiSpxdjgQF0c6kFEfDcz922w6TcR8WCzVyZJkiS1AE15KLF98UFEACJiN6D9OvaXJEmSNltNme7uJOCBiHi+uNwF+K9mq0iSJElqQZoyy8c9EdEV+ERx1V8z873mLUuSJElqGZr6Qpa9KPRMtwZ6RQSZ+ZNmq2oT4gTpkiRJWpeNDtQRcQvwMWAWsLy4OoHNMlBLqg4/zEqSWoqm9FD3Bbpn5lrnpJYkSZK2FE2Z5WM2hXmoJUmSpC1eU3qo2wFPRcQMoP5hRN+UKEmSpC1RUwL1uc1dhCRJktRSNWXavN9HxEeATxZXzcjMV5u3LEmSJKll2Ogx1BFxJDADOAI4EvhzRAxv7sIkSZKklqApDyWeBXwyM0dl5jFAP+DsdTWIiJsi4tWImN1g3bkR8XJEzCp+DW1CLZIkSVJVNSVQt1ptiMfiDTjOzcCBjay/LDN7F78mN6EWSZIkqaqa8lDiPRExBbi9uPxF4LfrapCZD0ZElyacS5IkSdqkbXQPdWaeBvwI2APoBVyXmac38fzfiIgnikNCdmziMSRJkqSqacpDibsBkzPz5Mw8iUKPdZcmnPsaCq8w7w28AvxgHeccGxG1EVG7cOHCJpxKkiRJKo+mjKG+A1jRYHl5cd1GycwFmbk8M1cA11N4uHFt+16XmX0zs2/79u03umBJkiSpXJoSqFtn5vsrF4o/b72xB4mIjg0W/5PCK80lSZKkFqUpDyUujIhDMnMSQEQcCixaV4OIuB0YBLSLiHnAOcCgiOgNJPAi8F9NqEWSJEmqqqYE6q8Bt0XEVRTC8DzgmHU1yMyRjay+sQnnliRJkjYpTXn1+N+AARGxHRCZ+VbzlyVJkiS1DE2Z5eMjEXEjcEdmvhUR3SNiTBlqkyRJkjZ5TXko8WZgCtCpuPwMcGJzFSRJkiS1JE0J1O0y8+cUp87LzGUUps6TJEmStjhNCdRvR8T/o/BAIhExAHizWauSJEmSWoimzPJxMjAJ+FhE/BFoDwxv1qokSZKkFqIpPdQfAz4P7ENhLPWzNC2YS5IkSS1eUwL12Zm5BNgR+BxwHXBNs1YlSZIktRBNCdQrH0A8CLg2MyfShFePS5IkSZuDpgTqlyPiR8CRwOSI2KaJx5EkSZJavKYE4SMpjJ0+MDPfAHYCTmvWqiRJkqQWoimvHq8DftVg+RXgleYsSpIkSWopHKohSZIklcBALUmSJJXAQC1JkiSVwEAtSZIklcBALUmSJJXAQC1JkiSVwEAtSZIklcBALUmSJJXAQC1JkiSVwEAtSZIklcBALUmSJJWgYoE6Im6KiFcjYnaDdTtFxL0R8Wzx+46VqkeSJElqDpXsob4ZOHC1dWcCUzOzKzC1uCxJkiS1GBUL1Jn5IPDaaqsPBcYXfx4PHFapeiRJkqTmUO0x1B/JzFcAit87NLZTRIyNiNqIqF24cGFFC5QkSZLWpdqBeoNk5nWZ2Tcz+7Zv377a5UiSJEn1qh2oF0RER4Di91erXI8kSZK0UaodqCcBo4o/jwImVrEWSZIkaaNVctq824E/AbtHxLyIGANcCAyJiGeBIcVlSZIkqcVoXakTZebItWwaXKkaJEmSpOZW7SEfkiRJUotmoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkrQutoFAETEi8BbwHJgWWb2rW5FkiRJ0obZJAJ10Wcyc1G1i5AkSZI2hkM+JEmSpBJsKoE6gd9FxMyIGLv6xogYGxG1EVG7cOHCKpQnSZIkNW5TCdSfysw9gc8DX4+IfRtuzMzrMrNvZvZt3759dSqUJEmSGrFJBOrMnF/8/irwa6BfdSuSJEmSNkzVA3VEfCgitl/5M7A/MLu6VUmSJEkbZlOY5eMjwK8jAgr1/DQz76luSZIkSdKGqXqgzszngV7VrkOSJElqiqoP+ZAkSZJaMgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUAgO1JEmSVAIDtSRJklQCA7UkSZJUgqoH6og4MCKejojnIuLMatcjSZIkbYyqBuqI2Aq4Cvg80B0YGRHdq1mTJEmStDGq3UPdD3guM5/PzPeBCcChVa5JkiRJ2mCRmdU7ecRw4MDMPK64/GWgf2Z+Y7X9xgJji4u7A09XtNDK2hX4e7WLUJN471o271/L5v1rubx3Ldvmfv8+mpnt17dT60pUsg7RyLo1En5mXgdcV/5yqi8iFmZm32rXoY3nvWvZvH8tm/ev5fLetWzev4JqD/mYB+zSYLkzML9KtWwq3qh2AWoy713L5v1r2bx/LZf3rmXz/lH9QP0I0DUidouIrYERwKQq11Rtb1a7ADWZ965l8/61bN6/lst717J5/6jykI/MXBYR3wCmAFsBN2Xmk9WsaROwRQxt2Ux571o271/L5v1rubx3LZv3jyo/lChJkiS1dNUe8iFJkiS1aAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEBmpJUpNExO0RcVgT2m0TEX+NiA7lqEuSKs1ALUnNLCIeiIjXI2KbatdSLhGxB9ALmFhc7hURT0bEoog4qcF+bSLizxGxy8p1mfkecBNwRqXrlqRyMFBLUjOKiC7AQCCBQyp87tYVPN1/AbdlZhaXLwBOpRCyvx0R/1ZcfzLwy8ycu1r7nwKjNucPHZK2HAZqSWpexwDTgZuBUQ03RMQHIuIHEfFSRLwZEX+IiA8Ut306Ih6OiDciYm5EHFtc/0BEHNfgGMdGxB8aLGdEfD0ingWeLa4bVzzGkoiYGREDG+y/VUT8b0T8LSLeKm7fJSKuiogfrFbvbyLixLVc5+eB3zdY3g2YlpkvF+vYNSJ2BQ4HLlu9cWbOA14HBqzrlylJLYGBWpKa1zHAbcWvAyLiIw22XQLsBewD7AScDqwoBs/fAlcC7YHewKyNOOdhQH+ge3H5keIxdqLQE3xHRGxb3HYyMBIYCnwYGA3UAeOBkRHRCiAi2gGDgdtXP1lEfIhCgH66werZwP4R0RnoAvwNuAI4PTOXrqXuORR6tCWpRTNQS1IziYhPAx8Ffp6ZMymEyqOK21pRCK8nZObLmbk8Mx8ujic+GrgvM2/PzKWZuTgzNyZQX5CZr2XmOwCZeWvxGMsy8wfANsDuxX2PA76dmU9nwePFfWcAb1II0QAjgAcyc0Ej52tb/P5Wg3WnAv8NTAJOAj5V3P58REyMiN9HxBGrHeetBseSpBbLQC1JzWcU8LvMXFRc/in/GvbRDtiWQshe3S5rWb+hVhmfHBGnRMSc4rCSN4Adiudf37nGA18q/vwl4Ja17PdG8fv2K1dk5kuZOTQz96TwoOL5FEL2JcDPKIwnvzQidmpwnO0bHEuSWiwDtSQ1g+JY6COB/SLiHxHxDwo9tb0iohewCHgX+FgjzeeuZT3A28AHGyz/WyP7rHwwkOJ46TOKteyYmW0p9DzHBpzrVuDQYr3dgDsb2ykz36YQyv9jLcf5DnBDsXe7J1CbmW8C84CPN9ivG/D4Wo4hSS2GgVqSmsdhwHIK45h7F7+6AQ8Bx2TmCgpTxV0aEZ2KDwfuXZzl4jbgcxFxZES0joj/FxG9i8edBQyLiA9GxMeBMeupY3tgGbAQaB0R36EwVnpXuD/5AAAgAElEQVSlG4DvRkTXKNgjIv4f1D8o+AiFnulfrhxCshaTgf1WXxkR3YFBwDXFVS8Any2OJe8K/L24384UxnhPX8/1SNImz0AtSc1jFPDjzPx7Zv5j5RfwQ+Do4pR2pwJ/oRBaXwO+D7TKzL9TeEjwlOL6WfzrYb3LgPeBBRSGZNy2njqmUHjA8RngJQq94g2HhFwK/Bz4HbAEuBH4QIPt4yn0Kq9tuMdK1xWvK1ZbfxWFceLLi8vfAr4JPAl8r/g7gcLY8vHFMeSS1KLFv6YQlSRt6SJiXwpDP7oUe9XXte9PKTyA2ejQkHW024bCUI99M/PVJhcrSZsIA7UkCSi81RCYADyemedXux5Jaikc8iFJIiK6UZhxoyNweZXLkaQWxR5qSZIkqQT2UEuSJEklMFBLkiRJJWhd7QI2Vrt27bJLly7VLkOSJEmbuZkzZy7KzPbr26/FBeouXbpQW1tb7TIkSZK0mYuIlzZkP4d8SJIkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklMFBLkiRJJTBQS5IkSSUwUEuSJEklKFugjoibIuLViJi9lu0REVdExHMR8URE7FmuWiRJkqRyKWcP9c3AgevY/nmga/FrLHBNGWuRJEmSyqJsgTozHwReW8cuhwI/yYLpQNuI6FiueiRJkqRyqOYY6p2BuQ2W5xXXrSEixkZEbUTULly4sCLFSZIkSRuimoE6GlmXje2YmddlZt/M7Nu+ffsylyVJkiRtuGoG6nnALg2WOwPzq1SLJEnSFmXcuHHU1NTQo0cPLr/8cgBee+01hgwZQteuXRkyZAivv/56o23POOMMampqqKmp4Wc/+1n9+h/+8Id8/OMfJyJYtGhR/fpf/vKX9OjRg4EDB7J48WIA/va3vzFixIgyXmHlVDNQTwKOKc72MQB4MzNfqWI9kiRJW4TZs2dz/fXXM2PGDB5//HHuuusunn32WS688EIGDx7Ms88+y+DBg7nwwgvXaHv33Xfz6KOPMmvWLP785z9z8cUXs2TJEgA+9alPcd999/HRj350lTY/+MEPmD59Oscccww//elPAfj2t7/Nd7/73fJfbAWUc9q824E/AbtHxLyIGBMRX4uIrxV3mQw8DzwHXA8cX65aJElS8yulh/PAAw+kbdu2HHzwwausP/bYY9ltt93o3bs3vXv3ZtasWcDm38NZaXPmzGHAgAF88IMfpHXr1uy33378+te/ZuLEiYwaNQqAUaNGceedd67R9qmnnmK//fajdevWfOhDH6JXr17cc889APTp04cuXbqs0aZVq1a899571NXV0aZNGx566CE6duxI165dy3qdlVLOWT5GZmbHzGyTmZ0z88bMvDYzry1uz8z8emZ+LDN7ZmZtuWqRJEnNq5QeToDTTjuNW265pdFtF198MbNmzWLWrFn07t0b2Px7OCutpqaGBx98kMWLF1NXV8fkyZOZO3cuCxYsoGPHwqRrHTt25NVXX12jba9evfjtb39LXV0dixYt4v7772fu3Llr7NfQOeecwwEHHMB9993HyJEj+b//+z/OPvvsslxbNbSudgGSJKnladjDCazSw/nAAw8AhR7OQYMG8f3vf3+N9oMHD67fb0M07OHcZpttNrsezkrr1q0bZ5xxBkOGDGG77bajV69etG69YbFw//3355FHHmGfffahffv27L333uttO2TIEIYMGQLA+PHjGTp0KE8//TSXXHIJO+64I+PGjav/31JL5KvHJUnSRiulh3N9zjrrLPbYYw9OOukk3nvvPWDz7+GshjFjxvDoo4/y4IMPstNOO9G1a1c+8pGP8MorhUfaXnnlFTp06NBo27POOotZs2Zx7733kpkb/MGmrq6O8ePHc/zxx/Otb32Lm266ib322ovbbrut2a6rGgzUkiRpozXs4TzwwAM3qodzXS644AL++te/8sgjj/Daa6/V924PGTKEmTNn8pvf/IY777yzvodz+PDhfPWrX6Wurq7kc29pVn7Y+fvf/86vfvUrRo4cySGHHML48eOBQk/yoYceuka75cuX149jf+KJJ3jiiSfYf//9N+icF110ESeccAJt2rThnXfeISJo1apVi79/BmpJUtVcdtll9OjRg5qaGkaOHMm7777LwIED6x9I69SpE4cddlijbdf2UNvRRx/N7rvvTk1NDaNHj2bp0qWAD7WVQyk9nGvTsWNHIoJtttmGr3zlK8yYMWOV7ZtrD2c1HH744XTv3p0vfOELXHXVVey4446ceeaZ3HvvvXTt2pV7772XM888E4Da2lqOO+44AJYuXcrAgQPp3r07Y8eO5dZbb63/MHXFFVfQuXNn5s2bxx577FHfBmD+/PnU1tbWh/RTTjmFAQMGMH78eI466qgKX30zy8wW9bXXXnulJKnlmzdvXnbp0iXr6uoyM/OII47IH//4x6vsM2zYsBw/fnyj7e+7776cNGlSHnTQQausv/vuu3PFihW5YsWKHDFiRF599dWZmbn33nvnkiVL8rrrrssrrrgiMzNHjBiRzzzzTDNf2ZZjwYIFmZn50ksv5e67756vvfZannrqqXnBBRdkZuYFF1yQp5122lrb33///Wvcv/nz52dm5ooVK/KEE07IM844Y5Xt55xzTt55552Zmdm/f/9csmRJ/uhHP8rLL7+82a5LWgmozQ3Ipz6UKEmqmmXLlvHOO+/Qpk0b6urq6NSpU/22t956i2nTpvHjH/+40bZre6ht6NCh9T/369ePefPmAT7UVg6HH344ixcvpk2bNqv0cB555JHceOON7Lrrrtxxxx1AoYfz2muv5YYbbgBg4MCB/PWvf+Wf//wnnTt35sYbb+SAAw7g6KOPZuHChWQmvXv35tprr60/38oeznPPPRf4Vw9n27ZtG53eTaqYDUndm9KXPdSSVnfppZdm9+7ds0ePHjlixIh85513ctSoUdmlS5fs1atX9urVKx977LFG27700ks5ZMiQ/MQnPpHdunXLF154ITMzr7zyyvzYxz6WQC5cuLB+/1/84hfZvXv3/PSnP52LFi3KzMznnnsuv/jFL5b9OjdHl19+eX7oQx/Kdu3a5VFHHbXKtvHjx+fhhx++zvaN9XCu9P7772efPn3ywQcfzMzM3/3ud7nnnnvmwQcfnG+88Ubuv//++dprrzXPhUjaLLGBPdSOoZbUor388stcccUV1NbWMnv2bJYvX86ECROAxueyXd0xxxzDaaedxpw5c5gxY0b9eM8t9W1flfT6668zceJEXnjhBebPn8/bb7/NrbfeWr/99ttvZ+TIkU0+/vHHH8++++7LwIEDAR9qk1Q+BmpJLd7KYQPLli1bY9jAujz11FMsW7asfm7U7bbbrn4e1C31bV+VdN9997HbbrvRvn172rRpw7Bhw3j44YcBWLx4MTNmzOCggw5q0rHPO+88Fi5cyKWXXrrGNh9qk9TcDNSSWrSdd96ZU089lV133ZWOHTuyww471E/f1Nhctg0988wztG3blmHDhtGnTx9OO+00li9fvs7zORdu89l1112ZPn06dXV1ZCZTp06lW7duANxxxx0cfPDBbLvttht93BtuuIEpU6Zw++2306rVmn/mNtdpuyRVj4FaUou2tmEDa5vLtqFly5bx0EMPcckll/DII4/w/PPPc/PNN6/zfA4baD79+/dn+PDh7LnnnvTs2ZMVK1YwduxYACZMmLDGcI+G03ZB4aG2I444gqlTp9K5c2emTJkCwNe+9jUWLFjA3nvvTe/evTn//PPr22zW03ZJqpoojLduOfr27Zu1tbXVLkPSJuKOO+7gnnvu4cYbbwTgJz/5CdOnT+fqq6+u3+eBBx7gkksu4a677lql7fTp0znzzDPrZ4q45ZZbmD59OldddVX9Pl26dKG2tpZ27dqt0rauro6DDz6YKVOmsP/++zNx4kR++tOfstVWW/HVr361TFcrSetx7g7VrqC8zn2zoqeLiJmZ2Xd9+zltnqQWreGwgQ984ANMnTqVvn378sorr9CxY0cykzvvvJOampo12n7yk5/k9ddfZ+HChbRv355p06bRt+96/38TcNiANmMGMmmjOeRDUou2tmEDRx99ND179qRnz54sWrSIb3/728Cqwwa22morLrnkEgYPHkzPnj3JzPre5S32bV+SpI3mkA9JkvQv9lC3bN6/ZrWhQz7soZYkSZJKYKCWJEmSSmCgliRJkkrgLB+SpObnOE5JWxB7qCVJkqQSGKglSZKkEjjkQ9KmySEDkqQWwh5qSZIkqQQGakmSJKkEBmpJkiSpBAZqSZIkqQQGakmSJKkEZQ3UEXFgRDwdEc9FxJmNbN81Iu6PiMci4omIGFrOeiRJkqTmVrZAHRFbAVcBnwe6AyMjovtqu30b+Hlm9gFGAFeXqx5JkiSpHMrZQ90PeC4zn8/M94EJwKGr7ZPAh4s/7wDML2M9kiRJUrMr54tddgbmNlieB/RfbZ9zgd9FxP8AHwI+V8Z6JEmSpGZXzh7qaGRdrrY8Erg5MzsDQ4FbImKNmiJibETURkTtwoULy1CqJEmS1DTlDNTzgF0aLHdmzSEdY4CfA2Tmn4BtgXarHygzr8vMvpnZt3379mUqV5IkSdp45QzUjwBdI2K3iNiawkOHk1bb5+/AYICI6EYhUNsFLUmSpBajbIE6M5cB3wCmAHMozObxZEScHxGHFHc7BfhqRDwO3A4cm5mrDwuRJEmSNlnlfCiRzJwMTF5t3Xca/PwU8Kly1iBJkiSVk29KlCRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSmCgliRJkkpgoJYkSZJKYKCWJEmSSlDWQB0RB0bE0xHxXEScuZZ9joyIpyLiyYj4aTnrkSRJkppb63IdOCK2Aq4ChgDzgEciYlJmPtVgn67At4BPZebrEdGhXPVIkiRJ5VDOHup+wHOZ+Xxmvg9MAA5dbZ+vAldl5usAmflqGeuRJEmSml05A/XOwNwGy/OK6xr6D+A/IuKPETE9Ig4sYz2SJElSsyvbkA8gGlmXjZy/KzAI6Aw8FBE1mfnGKgeKGAuMBdh1112bv1JJkiSpicrZQz0P2KXBcmdgfiP7TMzMpZn5AvA0hYC9isy8LjP7Zmbf9u3bl61gSZIkaWOVM1A/AnSNiN0iYmtgBDBptX3uBD4DEBHtKAwBeb6MNUmSJEnNqmyBOjOXAd8ApgBzgJ9n5pMRcX5EHFLcbQqwOCKeAu4HTsvMxeWqSZIkSWpu5RxDTWZOBiavtu47DX5O4OTilyRJktTi+KZESZIkqQQGakmSJKkEGxWoI2JAREwrzht9WLmKkiRJklqKdY6hjoh/y8x/NFh1MnAIhTmmH6YwS4ckSZK0xVrfQ4nXRsRM4OLMfBd4AzgKWAEsKXdxkiRJ0qZunUM+MvMwYBZwV0R8GTiRQpj+IOCQD0mSJG3x1juGOjN/AxwAtAV+BTydmVdk5sJyFydJkiRt6tYZqCPikIj4AzANmE3hbYf/GRG3R8THKlGgJEmStClb3xjq/wP2Bj4ATM7MfsDJEdEV+P8oBGxJkiRpi7W+QP0mhdD8AeDVlSsz81kM05IkSdJ6x1D/J4UHEJdRmN1DkiRJUgPr7KHOzEXAlRWqRZIkSWpxfPW4JEmSVAIDtSRJklQCA7UkSZJUAgO1VLR8+XL69OnDwQcfDMALL7xA//796dq1K1/84hd5//3312jz/vvv85WvfIWePXvSq1cvHnjggfptZ511FrvssgvbbbfdKm2uvPJKampqGDp0aP0x//CHP3DyySeX7+IkSVLZGKilonHjxtGtW7f65TPOOIOTTjqJZ599lh133JEbb7xxjTbXX389AH/5y1+49957OeWUU1ixYgUAX/jCF5gxY8YabW644QaeeOIJ+vTpw5QpU8hMvvvd73L22WeX6cokSVI5GaglYN68edx9990cd9xxAGQm06ZNY/jw4QCMGjWKO++8c412Tz31FIMHDwagQ4cOtG3bltraWgAGDBhAx44dGz3f0qVLqauro02bNtxyyy0MHTqUHXfcsRyXJkmSysxALQEnnngiF110Ea1aFf6TWLx4MW3btqV168LMkp07d+bll19eo12vXr2YOHEiy5Yt44UXXmDmzJnMnTt3nec69dRTGTBgAAsXLuRTn/oU48eP5/jjj2/+i5IkSRVhoNYW76677qJDhw7stdde9esyc439ImKNdaNHj6Zz58707duXE088kX322ac+hK/Nl7/8ZR577DFuvfVWLr30Ur75zW/y29/+luHDh3PSSSfVDxmRJEktw/pePS5t9v74xz8yadIkJk+ezLvvvsuSJUs48cQTeeONN1i2bBmtW7dm3rx5dOrUaY22rVu35rLLLqtf3meffejatesGnXf+/Pk88sgjnHPOOfTr148//elPnHXWWUydOpUhQ4Y02/VJkqTysodaW7wLLriAefPm8eKLLzJhwgQ++9nPctttt/GZz3yGX/ziFwCMHz+eQw89dI22dXV1vP322wDce++9tG7dmu7du2/Qec8++2y++93vAvDOO+8QEbRq1Yq6urpmujJJklQJBupm8u6779KvXz969epFjx49OOeccwAYOHAgvXv3pnfv3nTq1InDDjus0fYHHnggbdu2rZ+ybaWjjz6a3XffnZqaGkaPHs3SpUsB+OUvf0mPHj0YOHAgixcvBuBvf/sbI0aMKONVblm+//3vc+mll/Lxj3+cxYsXM2bMGAAmTZrEd77zHQBeffVV9txzT7p168b3v/99brnllvr2p59+Op07d6auro7OnTtz7rnn1m977LHHAOjTpw8AY8aMoWfPnjz66KMceOCBFbpCSZLUHKKxsaKbsr59++bKWRQ2JZnJ22+/zXbbbcfSpUv59Kc/zbhx4xgwYED9PocffjiHHnooxxxzzBrtp06dSl1dHT/60Y+466676tdPnjyZz3/+8wAcddRR7Lvvvvz3f/83++yzD1OmTGHChAm8++67/M///A8jR47k/PPP3+AhB9Im7dwdql1BeZ37ZrUrKC/vX8vlvWvZvH/NKiJmZmbf9e1nD3UziYj6F3gsXbqUpUuXrvIQ21tvvcW0adPW2kM9ePBgtt9++zXWDx06lIggIujXrx/z5s0DoFWrVrz33nv1U6899NBDdOzY0TAtSZJUYT6U2IyWL1/OXnvtxXPPPcfXv/51+vfvX7/t17/+NYMHD+bDH/5wk469dOlSbrnlFsaNGwfAOeecwwEHHECnTp249dZbOfLII5kwYUKzXIckSZI2nD3UzWirrbZi1qxZzJs3jxkzZjB79uz6bbfffjsjR45s8rGPP/549t13XwYOHAjAkCFDmDlzJr/5zW+48847GTp0KE8//TTDhw/nq1/9qg+2SZIkVYiBugzatm3LoEGDuOeee4DCS0JmzJjBQQcd1KTjnXfeeSxcuJBLL710jW11dXX1Lwb51re+xU033cRee+3FbbfdVtI1SJIkacOUNVBHxIER8XREPBcRZ65jv+ERkRGx3kHfm6qFCxfyxhtvAIUp0O677z4+8YlPAHDHHXdw8MEHs+222270cW+44QamTJnC7bffXv8Wv4YuuugiTjjhBNq0aePUa5IkSVVQtkAdEVsBVwGfB7oDIyNijQl6I2J74JvAn8tVSyW88sorfOYzn2GPPfbgk5/8JEOGDKmfAm/ChAlrDPeora3luOOOq18eOHAgRxxxBFOnTqVz585MmTIFgK997WssWLCAvffem969e3P++efXt5k/fz61tbX18yOfcsopDBgwgPHjx3PUUUeV+5IlSZJEGafNi4i9gXMz84Di8rcAMvOC1fa7HLgPOBU4NTPXOSfepjptnjZRm/P0QU791LJ5/1q2zfn+ee9aNu9fs9oUps3bGZjbYHlecV29iOgD7JKZd7EOETE2ImojonbhwoXNX6kkSZLUROUM1NHIuvru8IhoBVwGnLK+A2XmdZnZNzP7tm/fvhlLlCRJkkpTzkA9D9ilwXJnYH6D5e2BGuCBiHgRGABMaskPJkqSJGnLU85A/QjQNSJ2i4itgRHApJUbM/PNzGyXmV0yswswHThkfWOoJUmSpE1J2QJ1Zi4DvgFMAeYAP8/MJyPi/Ig4pFznlSRJkiqprK8ez8zJwOTV1n1nLfsOKmctTebTspIkSVoH35QoSfr/27v3aNvLut7j7497g+JdES1FEAQNTFQyLDNvmZcKLfOWaZocrVGi5jmWqRVSp8zMY9nNCyhZaWIeRcPwpEEXM7nIRW6KhGSi4QVFDGHD9/wxfxs2e88995xrstYznznfrzH2WPP3mz823zG++7PWs37zeZ6fJGkODqglSZKkOTigliRJkubggFqSJEmagwNqSZIkaQ4OqCVJkqQ5OKCWJEmS5uCAWpIkSZqDA2pJkiRpDg6oJUmSpDk4oJYkSZLm4IBakiRJmoMDakmSJGkODqglSZKkOTigliRJkubggFqSJEmagwNqSZIkaQ4OqCVJkqQ5OKCWJEmS5uCAWpIkSZqDA2pJkiRpDg6oJUmSpDk4oJYkSZLm4IBakiRJmoMDakmSJGkODqglSZKkOTigliRJkuawrgPqJI9PcmGSi5K8fMz7L01yXpKzk3wkyb7rWY8kSZJ0c1u3AXWSTcAfA08ADgZ+KsnB2132SeDBVXUI8B7gtetVjyRJkrQe1vMO9WHARVV1cVVdA7wLeNK2F1TVP1TVt4bDjwN7r2M9kiRJ0s1uPQfU9wD+Y5vjzw/nduYI4EPj3kjygiSnJTnt8ssvvxlLlCRJkuazngPqjDlXYy9MngU8GPi9ce9X1Zur6sFV9eC99trrZixRkiRJms/mdfy7Pw/cc5vjvYEvbH9RkscArwQeUVXfXsd6JEmSpJvdet6hPhU4MMl+SXYHngGcsO0FSR4EvAl4YlX91zrWIkmSJK2LdRtQV9UW4IXAScD5wLur6twkRyd54nDZ7wG3BY5PcmaSE3by10mSJEkLaT2nfFBVJwInbnfu17d5/Zj1/P9LkiRJ680nJUqSJElzcEAtSZIkzcEBtSRJkjQHB9SSJEnSHBxQS5IkSXNwQC1JkiTNwQG1JEmSNAcH1JIkSdIcHFBLkiRJc3BALUmSJM3BAbUkSZI0BwfUkiRJ0hwcUEuSJElzcEAtSZIkzcEBtSRJkjQHB9SSJEnSHBxQS5IkSXNwQC1JkiTNwQG1JEmSNAcH1JIkSdIcHFBLkiRJc3BALUmSJM3BAbUkSZI0BwfUkiRJ0hwcUEuSJElzcEAtSZIkzcEBtSRJkjSHdR1QJ3l8kguTXJTk5WPev2WSvx7e/7ck91rPeiRJkqSb27oNqJNsAv4YeAJwMPBTSQ7e7rIjgK9V1QHA/wF+d73qkSRJktbDet6hPgy4qKourqprgHcBT9rumicBxw2v3wP8UJKsY02SJEnSzWo9B9T3AP5jm+PPD+fGXlNVW4CvA3uuY02SJEnSzWrzOv7d4+401xquIckLgBcMh99McuGctS2yfYBLN+z/9mo/ELgZ2bu+2b++2b9+2bu+LXv/9p3movUcUH8euOc2x3sDX9jJNZ9Pshm4A/DV7f+iqnoz8OZ1qnOhJLm8qh7cug7Nzt71zf71zf71y971zf6NrOeUj1OBA5Psl2R34BnACdtdcwLwnOH1U4CPVtUOd6hXzBWtC9Ca2bu+2b++2b9+2bu+2T/W8Q51VW1J8kLgJGATcGxVnZvkaOC0qjoBOAZ4R5KLGN2ZfsZ61dORr7cuQGtm7/pm//pm//pl7/pm/1jfKR9U1YnAidud+/VtXl8NPHU9a+jQSkxtWVL2rm/2r2/2r1/2rm/2D4gzLCRJkqS189HjkiRJ0hwcUEuSJElzWNc51Nq5JC+d4rKrqupN616MZpbkyVNcdvWwjkALxv5JbSQ5dIrLrq2qc9a9GM0kyZ2nuOz6qlrJXT+cQ91IksuAP2X8w222+umqus8GlaQZJPkK8H4m9+/hVXXvDSpJM7B//UryjV1dAlzm987FlORKRtvqTsreflV1r42pSNNKcjWj54lM6t2mqtpng0paKN6hbucdVXX0pAuS3GajitHMPlRVz5t0QZK/2KhiNDP716/PVtWDJl2Q5JMbVYxmdmpVPXrSBUk+ulHFaCbnm72d8w61JKkbSfavqovnvUbSbJLcatjueK5rlpUD6oaS3Bv4CUaPX98CfAZ4Z1W5SXoHktwWeDw37d+Hq+r6poVpKvavf0nuBGypqitb16LZJHkw22Svqi5oXJKmlGQvYG9Gvfv3qvpm45IWgrt8NJLkxcCfAbcCvhfYg9E3l39N8siGpWkKSZ4G/AOjAdkLgcOAZwNnJrl/y9q0a/avX0nunuTPk3wd+DJwbpJLkxyVZLfW9WmyJI9IchrwGuBY4OeAY5KcnOSebavTJEkOTvL3wL8C/wa8FTgnyduT3KFtde15h7qRJOcAD6yq65LcGjixqh6ZZB/g/buap6S2kpwNfF9VfSvJXYC/rKrHJTkE+LOqemjjEjWB/evXML/26Ko6edit5QeBVwG/Cty1ql7QtEBNNMyxfWxVXZ5kP+D1VfUTSX4YeFlVPbZxidqJJB8HnlNVFyY5DPjFqnpOkucDj6uqpzQusSnvULe1dVHoLYHbAVTVpYB3WRZfgP8eXl8F3BWgqs4Gbt+qKE3N/vVrz6o6GaCq3stoN5arqupVwMObVqZpbKqqy4fXlwL7AlTV/wPu0Z0/DbsAAB7cSURBVKwqTWOPqroQoKo+Adx/eP0W4OCWhS0Cd/lo563AqcNvfA8HfhdumJv01ZaFaSonAn+X5BTgCcDxcMM+nZO2FNJisH/9ujzJs4CPAj8JXAKQJHiTqAenJTkG+AjwJOBkgOGT2k0N69KufTbJrzHq3ZOBMwGGqVYrP550ykdDSe4HHAR8ygUZ/UnyI4x+Kz9ruLtCklsAu1XVt5sWp12yf30apsW9jlHvzmQ0TeCyJHsCj6yqv2laoCYaBl/PZ8gecOww9XEPRlN2Pte0QO1UkjsCr+DG3r2mqq4c5k8fVFUfb1pgYw6oG3O1bP/caaBv9k+SNC8/HmvE1bJ9c6eBvtm/viV5VJI/SvL+JH+T5DVJDmhdl3YtyW2THJ3kU0m+nuTyJB9P8tzWtWmyJLdI8rNJPpjkrCSnJ3mXO5ONOKBu51hGK2QPAB4GXFBV+wH/AhzTtDJN4y8YfVR5B+CpwN8wmr6zGfjjloVpKvavU0leA/wM8HHgWuBi4LPA8Ume2rI2TeUvGfXs8cCrgT9ktGXlo5L8dsvCtEvHMFpE+hpG247+7XDuVUmObFnYInDKRyNJzqqqB2xzfEZVHTq8Pq+qVn7F7CIb07/Tq+p7htcXVNV3tatOu2L/+pXknKq6//B6M3BKVf3AMHXnn6rqu9tWqEnGZO/UqvreYf3CeWZvcSU5u6oO2eb441X1fUluCZxZVQc1LK8571C389kkv5bkoUleh6tle3N5kmcNUweOxJ0GemP/+nX9sBsLwN0Zdoaoqq/hDi09uCrJwwCSHM6wq9XwhFL7t9iuzegJzyQ5FLgGYFjEvfJ3Z/3B0c7zGO09/Qrg28CLh/O3ZvRxphbb84AnAh8GHsLoaXsAd2b0gAktNvvXr98GPpnkw8A/A78JNyzwPqtlYZrKzwOvT3IF8CvAkXBD/5xutdheBvxDkk8zmib3Mrihdx9sWdgicMqHJKkrwx3q/YGLquqK1vVIq2L4FG/Pqvpy61oWjXeoG0myKcnPJfnNJA/d7r1XtapLazf81q5OJHlckiOS7Lvd+ee1qknTqaqvVtVpWwfTLmbrW0aPk1cfbgfssBNZkkPGXLtSvEPdSJK3Mpre8QlGK5xPqaqXDu/dsEBRiynJlYzmjG075+/WwLeAqiofX73AhgHYw4AzgMOBN1TVG4f3zN8CS/KH259i9D30zwGq6kUbXpSmluTs7U8B9wG2PtJ65QdmiyrJ04A3AP8F7AY8t6pOHd5b+e+bLn5r57Ct3ziS/BHwJ0neC/wULszowdsZ/Zb+sqr6EkCSfx+2PtTiOxx4UFVtSXIU8FdJ9q+qX8L8LbonM3pc9Ye5sVfPAE5vVZBmcgnwDeC3gP9m1MN/YpRJLbZXAN8zPJn0MOAdSV5RVe/F75tO+Who960vqmpLVb2A0U4fHwVu26wqTaWqjgT+AHhnkhcNWz75cU8/NlfVFoBh2sDhwO2THM822dRCOojRw3geD/x9VR0HXFlVxw2vtcCq6omMFrS9GXhAVV0CXFtVn/Ox4wtvU1VdBlBVnwAeBbwyyYvw558D6oZOS/L4bU9U1dHA24B7NalIM6mq04HHDIenALdqWI5m89kkj9h6UFXXVdURjD52Xum9VBddVV1ZVS8Bfh/4iyT/C3+WdaWq/i/wBOCRSU7AX2J7ceXWbfMAhsH1I4EnAfdrVdSicA61dDNI8p2MphCc2LoW7VqSPQCq6r/HvHePqvrPja9Ksxp2HPgF4Pur6lmt69HskjyAUf/+rHUtmmzo1VVVddF253cDnlZVf9mmssXggFqSJEmagx+TSZIkSXNwQC1JkiTNwQG1JEmSNAcH1AsgyRmTjrXY7F/fknxw0rEW17CH+E6PtdjMXr/M3o4cUC+A7Z8utOpPG+qN/eve83dxrMW1/cNcfLhLX8xev8zedtzlYwEk2Rc4sKr+ftjOa3NVXdm6Lk3H/vVt6Nk+VXVh61qkVWL2tEy8Q91YkucD7wHeNJzaG3hfu4o0C/vXtySHM3pC6d8Nxw8cHjShBZfkPkk+kuRTw/EhSV7Vui5Nx+z1y+yN54C6vV8EfgD4BkBVfQa4a9OKNAv717ejgMOAKwCq6kx8Umkv3gL8KnAtQFWdDTyjaUWaxVGYvV6ZvTEcULf37aq6ZutBks2A83D6Yf/6tqWqvt66CK3JravqE9ud29KkEq2F2euX2RvDAXV7pyR5BbBHkh8Gjgc+0LgmTc/+9e1TSZ4JbEpyYJI3Ah9rXZSm8uUk92b4BTbJU4DL2pakGZi9fpm9MVyU2FiSWwBHAI8FApxUVW9pW5WmZf/6luTWwCsZ9Q/gJOC3qurqdlVpGkn2B94MPBT4GvDvwLOq6pKWdWk6Zq9fZm88B9SNJXk28L5td4VI8mNV5X6cHbB/fUvyg8DHquq6bc4dWlXuJd6JJLcBbuHOOn0xe/0zezfllI/23gj8U5KDtjl3dKtiNDP717eTgI8muds2597aqhhNL8l1SV4DfGvrD3QfqtQVs9cpszeeA+r2/h14HvCeJE8dzqVhPZqN/evbhcDvAScneehwzv714VxGP8M+nOTOwzl71w+z1y+zN8bm1gWIqqozkjwCeGeShwCbWhelqdm/vlVVfTDJhcBfJzkWd2npxZaq+uUkT2P0KdHPYO96Yvb6ZfbG8A51e5cBVNWXgccx+kf53U0r0izsX98CN+wf/oPAw4FDmlakaW3t3buBpwFvA/ZvWpFmYfb6ZfbGcFGiJG0jyT5VdWnrOjRZku+pqtO3Ob498ONV9ecNy9IczF4fzN54TvloJMkbquolST7AmI9KquqJDcrSlOxf35L8clW9Nskf7uSSF21oQZpakkdX1UeBfZPsu93b32xRk6Zn9vpl9iZzQN3OO4avr2tahdbK/vXt/OHr6ROv0iJ6BPBR4PAx7xXw3o0tRzMye/0yexM45WOBJLkTcM+qOrt1LZqd/evb8JCe21bVN1rXIq0Ss6dl4KLExpKcnOT2w9YzZwFvS/L61nVpOvavb0n+aujfbYDzgAuTvKx1Xdq1JC8eepckb01yRpLH7vq/1CIwe/0ye+M5oG7vDsNv5U8G3lZV3wM8pnFNmp7969vBQ/9+HDgR2Ad4dtuSNKXnDb17LHBX4GeB17QtSTMwe/0ye2M4oG5vc5LvZLT1jI+r7o/969tuSXZj9EP9/VV1Le6n2outD5L4EUa/zJ6FD5foidnrl9kbwwF1e0czegTrRVV1apL9gc80rknTs399exNwCXAb4B+HlevO4+zD6Uk+zOiH+klJbgdc37gmTc/s9cvsjeGixAWX5Fer6nda16G1sX99SRJgU1VtGY6fU1XHNS5LYwwL2R4IXFxVVyTZE7jH1kXBSe5XVec2LVJTM3v9MHvjOaBecEnOqKpDW9ehtbF/fbN//bJ3fbN//VrV3jnlY/Gt/Lykztm/vtm/ftm7vtm/fq1k7xxQLz4/Quib/eub/euXveub/evXSvbOAfXiW8nf9JaI/eub/ZPaMHvqigPqxXd86wI0F/vXt39pXYDW7JrWBWguZq9fK5k9FyU2lmQ/4EjgXsDmreer6omtatL07F/fktwR+Bl27N+LWtWk6SU5hB17995mBWlqZq9vZm9Hm3d9idbZ+4BjgA/gPo49sn99OxH4OHAO9q8rSY4FDgHO5cbeFbDSP9Q7YvY6ZfbG8w51Y0n+raoe0roOrY3969uqbu+0DJKcV1UHt65Da2P2+mX2xnNA3ViSZwIHAh8Gvr31fFWd0awoTc3+9S3JLwHfZPTY+G3799VmRWkqSY4Bfr+qzmtdi2Zn9vpl9sZzykd79weeDTyam3508uhmFWkW9q9v1wC/B7ySG7d6KmD/ZhVpWscB/5rki4wGZAGqqg5pW5amZPb6ZfbG8A51Y0kuAA6pqpVcFds7+9e3JJ8FHlJVX25di2aT5CLgpWw3B7eqPtesKE3N7PXL7I3nHer2zgLuCPxX60K0Jvavb+cC32pdhNbk0qo6oXURWjOz1y+zN4YD6vbuBlyQ5FRuOo/Mbdf6YP/6dh1wZpJ/4Kb9c+uuxXdBkr9itMPOtr1b6Z0GOmL2+mX2xnBA3d5vtC5Ac7F/fXvf8Ef92YPRD/PHbnNu5bfu6ojZ65fZG8M51JIkSdIcvEPdSJJ/rqqHJbmSG1c4w42rZW/fqDRNwf71Lcm7q+ppSc7hpv0DYNVXqy+yJL9cVa9N8kbG984pAwvM7PXL7E3mgLqRqnrY8PV2rWvR7Oxf9148fP2xplVoLc4fvp7WtAqtldnrl9mbwCkfkiRJ0hxu0bqAVZVkl0/Sm+YatWH/+pbkgzfHNdp4SY66Oa5RG2avX2ZvMu9QN5Lkv4HPTLoEuENV7bNBJWkG9q9vSa4A/nHSJcD9qsqnti2YJJ8HXj/pEuD5VfVdG1SSZmD2+mX2JnMOdTvT/IO7bt2r0FrZv749aYprfPrlYnoLsKu1C2/ZiEK0JmavX2ZvAu9QS5IkSXNwDrUkSZI0BwfUkiRJ0hycQ91IkjtPcdn1VXXFuhejmdk/SZK0lXOoG0lyNfAFRqtid2aTu0QsJvvXtyTf2NUlwGVVdZ+NqEfTS3LCFJd9taqeu961aHZmr19mbzLvULdzflU9aNIFST65UcVoZvavb5+1f906CPgfE94P8McbVItmZ/b6ZfYm8A51I0luVVVXz3uN2rB/fUuyf1VdPO812nhJnlZV7573GrVh9vpl9iZzQL1Akty5qr7aug6tjf2TJGk1uctHI0letc3rg5N8Gjg9ySVJHtKwNE3B/i2vJOe0rkFrk+TNrWvQ2pm9fpk971A3k+SMqjp0eP23wB9V1YeSHAa8oaoe2rZCTWL/+pbkyTt7C/izqtprI+vR9CbssBPgrKraeyPr0WzMXr/M3mQuSlwMd6+qDwFU1SeS7NG6IM3E/vXnr4G/BMbdUbjVBtei2VwOfI6b7rBTw/Fdm1SkWZi9fpm9CRxQt7P/sAVNgL2T3LqqvjW8t1vDujQd+9e3s4HXVdWntn8jyWMa1KPpXQz8UFVduv0bSf6jQT2ajdnrl9mbwAF1O0/a7ngTQJK7AX+68eVoRvavby8BdrYf7k9sZCGa2RuAOwE7/FAHXrvBtWh2Zq9fZm8C51BLkiRJc/AOdSNJ7gD8KvDjwNZFGP8FvB94jY+sXmz2r29JNgNHMLojdndG8wC/wKh/x1TVtQ3L0y4k+S5GnxLdgxt7d0JVnd+0MO2S2eub2ds5t81r593A14BHVtWeVbUn8Kjh3PFNK9M07F/f3gE8EDgK+BHgR4FXAw8A/qJdWdqVJL8CvIvR+oVPAKcOr9+Z5OUta9NUzF6nzN5kTvloJMmFVXXfWd/TYrB/fdtF/z5dVffZ6Jo0nWHP9/ttfyczye7AuVV1YJvKNA2z1y+zN5l3qNv5XJJfHhaxAaMFbcNvgCu/WrYD9q9vX0vy1CQ3fA9McoskT2f0KYMW1/WMpgps7zuH97TYzF6/zN4EzqFu5+nAy4FTkmzdv/FLwAnA05pVpWnZv749A/hd4E+SfI3Rx5Z3BD46vKfF9RLgI0k+w42/vO4DHAC8sFlVmpbZ65fZm8ApH5JWWpI9GX0v/HLrWjSd4e7mYYwWRgX4PHBqVV3XtDDNxOz1x+ztnHeoF0iSD1bVj7WuQ2tj//qy/Wr1JF8A3l9VF7StTFOobf5cv81XdcDsdc3s7YRzqBfLPVoXoLnYv05MWK3+LlerL7YkjwU+w467RHxmeE8LzOz1y+xN5pSPBZLk2Kp6Xus6tDb2rx+uVu9XkvOBJ1TVJdud3w84saoOalKYpmL2+mX2JvMO9QJIskeS+zoY65P965Kr1fu1mdG8ze39J7DbBtei2Zm9fpm9CZxD3ViSw4HXAbsD+yV5IHB0VT2xbWWahv3rlqvV+3UscGqSd3Fj7+7JaIeIY5pVpWmZvX6ZvQmc8tFYktOBRwMnV9WDhnNnV9UhbSvTNOxfv1yt3q8kB3HjoratvTuhqs5rWpimYvb6ZfZ2zjvU7W2pqq8naV2H1sb+9cvV6p2qqvOB81vXoTUze50yezvngLq9TyV5JrApyYHAi4CPNa5J07N/HRpWpP8JoxXr/zmc3hs4IMkvVNWHmxWnNUtyVFUd1boO7ZzZW05mzykfzSW5NfBKYOuWMycBv1VVV7erStOyf31ytfpySnJ4VX2gdR3aObO3nMyeA2pJK2hYEHVQVW3Z7vzuwHlVdUCbyqTlZva0rJzysYCSvLmqXtC6Dq2N/euCq9U7lWQzcATwE4y2XyvgC8D7gWO2399YC8fsdcrsTeYd6kaS3HlnbwFnVdXeG1mPZmP/+udq9T4leSdwBXAcN+6JuzfwHODOVfX0VrVpOmavT2ZvMgfUjSS5Dvgco28mW9VwfI+q2r1JYZqK/ZPaSHJhVd13J+99uqrus9E1SavA7E3mkxLbuRh4ZFXtt82f/atqP+BLrYvTLtm/JZXkqNY1aKKvJXnqsJcxMNrXOMnTga81rEtzMnsLz+xN4IC6nTcAd9rJe6/dyEK0JvZveZ3eugBN9AzgKcCXknw6yaeBLwJPHt5Tv8zeYjN7EzjlQ5LUpSR7Mvo59uXWtUirxOztyAG1pJXjanWpDbOnZeWAWtLKcbW61IbZ07JyQC1p5bhaXWrD7GlZuShR0ipytbrUhtnTUnJAvQCSnDHpWIvN/nXJ1epLwOx1yewtAbO3I6d8SFpprlaX2jB7WibeoV4ASfZN8pjh9R5Jbte6Jk3P/vWtqr7iD/Q+mb2+mb1+mb0dOaBuLMnzgfcAbxpO7Q28r11FmoX9k9owe1IbZm88B9Tt/SLwA8A3AKrqM8Bdm1akWdg/qQ2zJ7Vh9sZwQN3et6vqmq0Hw6b3Tmzvh/2T2jB7UhtmbwwH1O2dkuQVwB5Jfhg4HvhA45o0PfvXOVerd8vsdc7sdcvsjeEuH40Ne3EeATwWCHBSVb2lbVWalv2T2jB7UhtmbzwH1I0leXFV/cGuzmkx2b/+JdkXOLCq/j7JHsDmqrqydV2azOz1z+z1yeyN55SP9p4z5txzN7oIrZn965ir1btm9jpm9rpm9sbY3LqAVZXkp4BnAvslOWGbt24HfKVNVZqW/VsavwgcBvwbjFarJ1n51eqLzOwtDbPXGbM3mQPqdj4GXAbcBfj9bc5fCZzdpCLNwv4th29X1TVJAFerd8LsLQez1x+zN4FzqCWtrCSvBa4AfgY4EvgF4LyqemXTwqQlZ/a0bJxD3ViS70tyapJvJrkmyXVJvtG6Lk3H/nXv5cDlwDnAzwEn+gO9D2ave2avU2ZvPKd8tPdHwDMY7eP4YEa/rR/QtCLNwv717chhZfoNWz65Wr0bZq9vZq9fZm8M71AvgKq6CNhUVddV1duAR7WuSdOzf11ztXrHzF7XzF7HzN6OvEPd3reS7A6cOcwpuwy4TeOaND371yFXqy8Fs9chs7cUzN4YLkpsbNjY/kvA7sAvAXcA/mT47U8Lzv71aejbfsDvMJrLudWVwNlVtaVJYZqa2euT2euf2RvPAXVDSTYBx1XVs1rXotnZP6kNsye1YfZ2zjnUDVXVdcBew0cn6oz965+r1ftk9vpn9vpk9nbOOdTtXQL8yzCX7KqtJ6vq9c0q0iwuwf71zNXq/boEs9czs9evSzB7O3BA3d4Xhj+3YLQoQ32xf52rqouSbBruvLwtycda16SpmL3Omb1umb0xnEO94JK8saqObF2H1sb+LbYk/wg8Bngr8EVGq9WfW1UPaFqY5mb2FpvZW16rmj3nUC++H2hdgOZi/xbbsxl9H3who48u7wn8ZNOKdHMxe4vN7C2vlcyeUz4kraRhtfr/HlarXw28unFJ0kowe1pG3qGWtJJcrS61Yfa0jLxDvfjSugDNxf4ttktwtfqyMnuL7RLM3rJayew5oF58f9C6AM3F/i02V6svL7O32Mze8lrJ7LnLR2NJPgBs34SvA6cBb6qqqze+Kk3L/i23VV2t3gOzt9zM3uIye+M5h7q9i4FvAm8Z/nwD+BJwn+FYi83+LbeVXK3eCbO33Mze4jJ7Yzjlo70HVdXDtzn+QJJ/rKqHJzm3WVWalv2T2jB7UhtmbwzvULe3V5J9th4Mr+8yHF7TpiTNwP5JbZg9qQ2zN4Z3qNv7n8A/J/kso5Wx+wG/kOQ2wHFNK9M07N9yW8nV6p0we8vN7C0uszeGixIXQJJbAt/F6B/mBas6ob9X9m95JXluVb29dR0az+wtL7O32MzejhxQL4AkDwXuxTafGFTVnzcrSDOxf/1ytXrfzF6/zF7fzN6OnPLRWJJ3APcGzgSuG04XsNL/MHth/7p3MbAX8M7h+OncdLX6sxvVpV0we90ze50ye+N5h7qxJOcDB5eN6JL969vWlenjziU5t6ru16o2TWb2+mb2+mX2xnOXj/Y+BXxH6yK0Zvavb65W75fZ65vZ65fZG8MpH+3dBTgvySeAb289WVVPbFeSZmD/+uZq9X6Zvb6ZvX6ZvTGc8tFYkkeMO19Vp2x0LZqd/eufq9X7ZPb6Z/b6ZPbGc0AtaaW5Wl1qw+xpmTjlo5Ek/1xVD0tyJTfdOihAVdXtG5WmKdi/5eBq9f6YveVg9vpj9ibzDrWkleVqdakNs6dl4x3qBZBkE3A3bvqx16XtKtIs7F/Xtq5Wv6x1IZqd2eua2euY2duRA+rGkhwJ/AajDe2vH04XcEizojQ1+9c9V6t3yux1z+x1yuyN55SPxpJcBDykqr7SuhbNzv71zdXq/TJ7fTN7/TJ743mHur3/AL7eugitmf3rmD+8u2b2Omb2umb2xnBA3d7FwMlJ/pabfuz1+nYlaQb2r0OuVl8KZq9DZm8pmL0xHFC3d+nwZ/fhj/pi/zpUVQ8bvt6udS1aM7PXIbO3FMzeGA6oGxpWyd62ql7WuhbNzv4tB1er98fsLQez1x+zt3MOqBuqquuSHNq6Dq2N/eufq9X7ZPb6Z/b6ZPZ2zl0+Gkvy+8CBwPHAVVvPV9V7mxWlqdm/vrlavV9mr29mr19mbzzvULd3Z+ArwKO3OVfASv/D7Ij965ur1ftl9vpm9vpl9sbwDrWklZXkGOC+gKvVpQ1k9rRsvEPdWJJbAUcA9wNutfV8VT2vWVGamv3rnqvVO2X2umf2OmX2xnNA3d47gAuAxwFHAz8NnN+0Is3C/nXK1erdM3udMnvdM3tjOOWjsSSfrKoHJTm7qg5JshtwUlU9epf/sZqzf31L8pGq+qHWdWh2Zq9vZq9fZm8871C3d+3w9Yok3w18EbhXu3I0I/vXtzOTnICr1Xtk9vpm9vpl9sZwQN3em5PcCfg14ATgtsCvty1JM7B/fXO1er/MXt/MXr/M3hhO+ZAkSZLm4B3qxpLcDfht4O5V9YQkBwPfX1XHNC5NU7B/fXO1er/MXt/MXr/M3ni3aF2AeDtwEnD34fjTwEuaVaNZvR3717N3AN/BaLX6KcDewJVNK9K03o7Z65nZ69fbMXs7cEDd3l2q6t3A9QBVtQW4rm1JmoH969sBVfVrwFVVdRzwo8D9G9ek6Zi9vpm9fpm9MRxQt3dVkj0ZLcYgyffh41h7Yv/6tv1q9TvgavVemL2+mb1+mb0xnEPd3ksZrZK9d5J/AfYCntK2JM3A/vXN1er9Mnt9M3v9MntjuMvHAkiyGbgvEODCqrp2F/+JFoj9k9owe1IbZm9HDqgbSfLkSe+7uf1is3/LwdXq/TF7y8Hs9cfsTeaAupEkb5vwdrl10GKzf8shyYeAtwGvrKoHDHddPllVLo5aUGZvOZi9/pi9yRxQS1pZSU6tqu9N8smqetBw7syqemDr2qRlZva0bNzlo5EkP3ZzXKM27N/ScLV6Z8ze0jB7nTF7k3mHupEk5wPPZDShf2feXlWHbFBJmoH9Ww5JDgXeCHw38CmG1epVdXbTwrRTZm85mL3+mL3JHFA3kuRkht/MJ/hqVf3kBpSjGdm/5eFq9b6YveVh9vpi9iZzQC1p5bhaXWrD7GlZ+WAXSavo8AnvFeAPdWl9mD0tJe9QS5IkSXNwlw9JK8fV6lIbZk/LyjvUCyDJd1TVF3d2rMVm//rjavXlYPb6Y/aWg9nbkQPqBZDkb6vqR3d2rMVm//rjavXlYPb6Y/aWg9nbkQNqSZIkaQ7u8tFIklsBPw8cAJwNHFtVW9pWpWnZP6kNsye1YfYm8w51I0n+GrgW+CfgCcDnqurFbavStOyf1IbZk9owe5M5oG4kyTlVdf/h9WbgE1V1aOOyNCX7J7Vh9qQ2zN5kbpvXzg2PWPUjky7ZvyWR5DsmHWvhmL0lYfa6Y/Ym8A51I0muA67aegjsAXxreF1VdftWtWnX7N/ycLV6X8ze8jB7fTF7kzmgliRJkubgLh+SVo6r1aU2zJ6WlXeoJa0cV6tLbZg9LSsH1JJWjqvVpTbMnpaVu3xIWkWuVpfaMHtaSt6hlrRyXK0utWH2tKwcUEuSJElzcMqHJEmSNAcH1JIkSdIcHFBLkiRJc3BALUmSJM3BAbUkSZI0BwfUkiRJ0hz+P2aG2ecP222EAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_df(df.mean(level=(1)), sort_by=\"accuracy\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python (cvbp)", + "language": "python", + "name": "cvbp" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/image_classification/python/00_webcam.py b/image_classification/python/00_webcam.py index 46d2aa4..edb6056 100644 --- a/image_classification/python/00_webcam.py +++ b/image_classification/python/00_webcam.py @@ -2,42 +2,46 @@ # coding: utf-8 # Copyright (c) Microsoft Corporation. All rights reserved. -# +# # Licensed under the MIT License. # # WebCam Image Classification Quickstart Notebook -# +# #
-# +# # Image classification is a classical problem in computer vision that of determining whether or not the image data contains some specific object, feature, or activity. It is regarded as a mature research area # and currently the best models are based on [convolutional neural networks (CNNs)](https://en.wikipedia.org/wiki/Convolutional_neural_network). Such models with weights trained on millions of images and hundreds of object classes in [ImageNet dataset](http://www.image-net.org/) are available from major deep neural network frameworks such as [CNTK](https://www.microsoft.com/en-us/cognitive-toolkit/features/model-gallery/), [fast.ai](https://docs.fast.ai/vision.models.html#Computer-Vision-models-zoo), [Keras](https://keras.io/applications/), [PyTorch](https://pytorch.org/docs/stable/torchvision/models.html), and [TensorFlow](https://tfhub.dev/s?module-type=image-classification). -# -# +# +# # This notebook shows a simple example of how to load pretrained mobel and run it on a webcam stream. Here, we use [ResNet](https://arxiv.org/abs/1512.03385) model by utilizing `fastai.vision` package. -# +# # > For more details about image classification tasks including transfer-learning (aka fine tuning), please see our [training introduction notebook](01_training_introduction.ipynb). # In[1]: -get_ipython().run_line_magic('reload_ext', 'autoreload') -get_ipython().run_line_magic('autoreload', '2') -get_ipython().run_line_magic('matplotlib', 'inline') +get_ipython().run_line_magic("reload_ext", "autoreload") +get_ipython().run_line_magic("autoreload", "2") +get_ipython().run_line_magic("matplotlib", "inline") # In[2]: import sys + sys.path.append("../") -import io, time, urllib.request +import io +import time +import urllib.request import fastai from fastai.vision import * from ipywebrtc import CameraStream, ImageRecorder from ipywidgets import HBox, Label, Layout, Widget from torch.cuda import get_device_name from utils_ic.constants import IMAGENET_IM_SIZE -from utils_ic.datasets import imagenet_labels, data_path +from utils_ic.datasets import imagenet_labels +from utils_ic.common import data_path from utils_ic.imagenet_models import model_to_learner @@ -46,13 +50,13 @@ print(get_device_name(0)) # ## 1. Load Pretrained Model -# +# # We use ResNet18 which is a relatively small and fast compare to other CNNs models. The [reported error rate](https://pytorch-zh.readthedocs.io/en/latest/torchvision/models.html) of the model on ImageNet is 30.24% for top-1 and 10.92% for top-5*. -# +# # The pretrained model expects input images normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225], which is defined in `fastai.vision.imagenet_stats`. -# +# # The output of the model is the probability distribution of the classes in ImageNet. To convert them into human-readable labels, we utilize the label json file used from [Keras](https://github.com/keras-team/keras/blob/master/keras/applications/imagenet_utils.py). -# +# # > \* top-n: *n* labels considered most probable by the mode # In[3]: @@ -66,12 +70,12 @@ print(f"{', '.join(labels[:5])}, ...") # In[4]: -# Convert a pretrained imagenet model into Learner for prediction. +# Convert a pretrained imagenet model into Learner for prediction. learn = model_to_learner(models.resnet18(pretrained=True), IMAGENET_IM_SIZE) # ## 2. Classify Images -# +# # ### 2.1 Image file # First, we prepare a coffee mug image to show an example of how to score a single image by using the model. @@ -82,7 +86,7 @@ learn = model_to_learner(models.resnet18(pretrained=True), IMAGENET_IM_SIZE) IM_URL = "https://cvbp.blob.core.windows.net/public/images/cvbp_cup.jpg" urllib.request.urlretrieve(IM_URL, os.path.join(data_path(), "example.jpg")) -im = open_image(os.path.join(data_path(), "example.jpg"), convert_mode='RGB') +im = open_image(os.path.join(data_path(), "example.jpg"), convert_mode="RGB") im @@ -100,8 +104,8 @@ print(f"Took {time.time()-start_time} sec") # ### 2.2 WebCam Stream -# -# Now, let's use WebCam stream for image classification. We use `ipywebrtc` to start a webcam and get the video stream to the notebook's widget. For details about `ipywebrtc`, see [this link](https://ipywebrtc.readthedocs.io/en/latest/). +# +# Now, let's use WebCam stream for image classification. We use `ipywebrtc` to start a webcam and get the video stream to the notebook's widget. For details about `ipywebrtc`, see [this link](https://ipywebrtc.readthedocs.io/en/latest/). # In[7]: @@ -109,38 +113,42 @@ print(f"Took {time.time()-start_time} sec") # Webcam w_cam = CameraStream( constraints={ - 'facing_mode': 'user', - 'audio': False, - 'video': { 'width': IMAGENET_IM_SIZE, 'height': IMAGENET_IM_SIZE } + "facing_mode": "user", + "audio": False, + "video": {"width": IMAGENET_IM_SIZE, "height": IMAGENET_IM_SIZE}, }, - layout=Layout(width=f'{IMAGENET_IM_SIZE}px') + layout=Layout(width=f"{IMAGENET_IM_SIZE}px"), ) # Image recorder for taking a snapshot -w_imrecorder = ImageRecorder(stream=w_cam, layout=Layout(padding='0 0 0 50px')) +w_imrecorder = ImageRecorder(stream=w_cam, layout=Layout(padding="0 0 0 50px")) # Label widget to show our classification results -w_label = Label(layout=Layout(padding='0 0 0 50px')) +w_label = Label(layout=Layout(padding="0 0 0 50px")) + def classify_frame(_): """ Classify an image snapshot by using a pretrained model """ # Once capturing started, remove the capture widget since we don't need it anymore - if w_imrecorder.layout.display != 'none': - w_imrecorder.layout.display = 'none' - + if w_imrecorder.layout.display != "none": + w_imrecorder.layout.display = "none" + try: - im = open_image(io.BytesIO(w_imrecorder.image.value), convert_mode='RGB') + im = open_image( + io.BytesIO(w_imrecorder.image.value), convert_mode="RGB" + ) _, ind, prob = learn.predict(im) # Show result label and confidence w_label.value = f"{labels[ind]} ({prob[ind]:.2f})" except OSError: - # If im_recorder doesn't have valid image data, skip it. + # If im_recorder doesn't have valid image data, skip it. pass - + # Taking the next snapshot programmatically w_imrecorder.recording = True -# Register classify_frame as a callback. Will be called whenever image.value changes. -w_imrecorder.image.observe(classify_frame, 'value') + +# Register classify_frame as a callback. Will be called whenever image.value changes. +w_imrecorder.image.observe(classify_frame, "value") # In[8]: @@ -156,20 +164,16 @@ HBox([w_cam, w_imrecorder, w_label]) # # Webcam image classification example # -# +# #
-# +# # In this notebook, we have shown a quickstart example of using a pretrained model to classify images. The model, however, is not able to predict the object labels that are not part of ImageNet. From our [training introduction notebook](01_training_introduction.ipynb), you can find how to fine-tune the model to address such problems. # In[9]: -# Stop the model and webcam +# Stop the model and webcam Widget.close_all() # In[ ]: - - - - diff --git a/image_classification/python/01_training_introduction.py b/image_classification/python/01_training_introduction.py index 3bd6f06..54dd554 100644 --- a/image_classification/python/01_training_introduction.py +++ b/image_classification/python/01_training_introduction.py @@ -34,9 +34,9 @@ import sys sys.path.append("../") from pathlib import Path -from utils_ic.datasets import Urls, unzip_url, data_path +from utils_ic.datasets import Urls, unzip_url from fastai.vision import * -from fastai.metrics import error_rate, accuracy +from fastai.metrics import accuracy # Set some parameters. We'll use the `unzip_url` helper function to download and unzip our data. diff --git a/image_classification/python/11_exploring_hyperparameters.py b/image_classification/python/11_exploring_hyperparameters.py new file mode 100644 index 0000000..d692f60 --- /dev/null +++ b/image_classification/python/11_exploring_hyperparameters.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python +# coding: utf-8 + +# # Testing different Hyperparameters + +# Lets say we want to learn more about __how different learning rates and different image sizes affect our model's accuracy when restricted to 10 epochs__, and we want to build an experiment to test out these hyperparameters. +# +# In this notebook, we'll walk through how we use out Parameter Sweeper module with the following: +# +# - use python to perform this experiment +# - use the CLI to perform this experiment +# - evalute the results using Pandas + +# --- + +# In[1]: + + +import sys + +sys.path.append("../") +import os + +from utils_ic.common import ic_root_path +from utils_ic.datasets import unzip_url, Urls +from utils_ic.parameter_sweeper import * + + +# Lets download some data that we want to test on. To use the Parameter Sweeper tool for single label classification, we'll need to make sure that the data is stored such that images are sorted into their classes inside of a subfolder. In this notebook, we'll use the Fridge Objects dataset provided in `utils_ic.datasets.Urls`, which is stored in the correct format. + +# In[2]: + + +input_data = unzip_url(Urls.fridge_objects_path, exist_ok=True) + + +# ## Using Python + +# We start by creating the Parameter Sweeper object: + +# In[3]: + + +sweeper = ParameterSweeper() + + +# Before we start testing, it's a good idea to see what the default parameters Are. We can use a the property `parameters` to easily see those default values. + +# In[4]: + + +sweeper.parameters + + +# Now that we know the defaults, we can pass it the parameters we want to test. +# +# In this notebook, we want to see the effect of different learning rates across different image sizes using only 8 epochs (the default number of epochs is 15). To do so, I would run the `update_parameters` functions as follows: +# +# ```python +# sweeper.update_parameters(learning_rate=[1e-3, 1e-4, 1e-5], im_size=[299, 499], epochs=[10]) +# ``` +# +# Notice that all parameters must be passed in as a list, including single values such the number of epochs. +# +# These parameters will be used to calculate the number of permutations to run. In this case, we've pass in three options for learning rates, two for image sizes, and one for number of epochs. This will result in 3 X 2 X 1 total permutations (in otherwords, 6 permutations). + +# In[5]: + + +sweeper.update_parameters( + learning_rate=[1e-3, 1e-4, 1e-5], im_size=[299, 499], epochs=[10] +) + + +# Now that we have our parameters defined, we call the `run()` function with the dataset to test on. +# +# We can also optionally pass in: +# - the number of repetitions to run each permutation (default is 3) +# - whether or not we want the training to stop early if the metric (accuracy) doesn't improve by 0.01 (1%) over 3 epochs (default is False) +# +# The `run` function returns a multi-index dataframe which we can work with right away. + +# In[6]: + + +df = sweeper.run(datasets=[input_data], reps=3) +df + + +# ## Using the CLI + +# Instead of using python to run this experiment, we may want to test from the CLI. We can do so by using the `scripts/benchmark.py` file. +# +# First we move up to the `/image_classification` directory. + +# In[7]: + + +os.chdir(ic_root_path()) + + +# To reproduce the same test (different learning rates across different image sizes using only 8 epochs), and the same settings (3 repetitions, and no early_stopping) we can run the following: +# +# ```sh +# python scripts/sweep.py +# --learning-rates 1e-3 1e-4 1e-5 +# --im-size 99 299 +# --epochs 5 +# --repeat 3 +# --no-early-stopping +# --inputs +# --output lr_bs_test.csv +# ``` +# +# Additionally, we've added an output parameter, which will automatically dump our dataframe into a csv file. +# +# To simplify the command, we can use the acryonyms of the params. We can also remove `--no-early-stopping` as that is the default behavior. +# +# ```sh +# python scripts/sweep.py -lr 1e-3 1e-4 1e-5 -is 99 299 -e 5 -i -o lr_bs_test.csv +# ``` + +# In[8]: + + +# use {sys.executable} instead of just running `python` to ensure the command is executed using the environment cvbp +get_ipython().system( + "{sys.executable} scripts/sweep.py -lr 1e-3 1e-4 1e-5 -is 99 299 -e 5 -i {input_data} -o data/lr_bs_test.csv" +) + + +# Once the script completes, load the csv into a dataframe to explore it's contents. We'll want to specify `index_col=[0, 1, 2]` since it is a multi-index dataframe. +# +# ```python +# df = pd.read_csv("data/lr_bs_test.csv", index_col=[0, 1, 2]) +# ``` + +# HINT: You can learn more about how to use the script with the `--help` flag. + +# In[14]: + + +get_ipython().system("{sys.executable} scripts/sweep.py --help") + + +# --- + +# ## Visualizing our results + +# When we read in out multi-index dataframe, index 0 represents the run number, index 1 represents a single permutation of parameters, and index 2 represents the dataset. + +# To see the results, show the df using the `clean_df` helper function. This will display all the hyperparameters in a nice, readable way. + +# In[15]: + + +df = clean_df(df) +df + + +# Since we've run our benchmarking over 3 repetitions, we may want to just look at the averages across the different __run numbers__. + +# In[16]: + + +df.mean(level=(1, 2)).T + + +# Additionally, we may want simply to see which set of hyperparameters perform the best across the different __datasets__. We can do that by averaging the results of the different datasets. (The results of this step will look similar to the above since we're only passing in one dataset). + +# In[17]: + + +df.mean(level=(1)).T + + +# To make it easier to see which permutation did the best, we can plot the results using the `plot_df` helper function. This plot will help us easily see which parameters offer the highest accuracies. + +# In[18]: + + +plot_df(df.mean(level=(1)), sort_by="accuracy") diff --git a/image_classification/scripts/empty.txt b/image_classification/scripts/empty.txt deleted file mode 100644 index e69de29..0000000 diff --git a/image_classification/scripts/sweep.py b/image_classification/scripts/sweep.py new file mode 100644 index 0000000..78b92b3 --- /dev/null +++ b/image_classification/scripts/sweep.py @@ -0,0 +1,262 @@ +import sys +import os + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import argparse +import time +import shutil + +from utils_ic.parameter_sweeper import * +from utils_ic.datasets import data_path +from argparse import RawTextHelpFormatter, Namespace +from pathlib import Path + +argparse_desc_msg = """ +This script is used to benchmark the different hyperparameters when it comes to doing image classification. + +This script will run all permutations of the parameters that are passed in. + +This script will either run these tests on: +- an input dataset defined by --input +- a set of benchmarking datasets defined by --benchmark, which will create a + temporary data directory with all benchmarking datasets loaded into it, and delete it at the end. + +This script uses accuracy as the evaluation metric. + +Use [-W ignore] to ignore warning messages when running the script. +""" + +argparse_epilog_msg = """ +Example usage: +{default_params} + +# Test the effect of 3 learning rates on 3 batch sizes +$ python sweep.py -lr 1e-3 1e-4 1e-5 -bs 8 16 32 -i -o learning_rate_batch_size.csv + +# Test the effect of one cycle policy without using discriminative learning rates over 5 runs +$ python sweep.py -dl False -ocp True False -r 5 -i -o ocp_dl.csv + +# Test different architectures and image sizes +$ python sweep.py -a squeezenet1_1 resenet18 resnet50 -is 299 499 -i -o arch_im_sizes.csv + +# Test different training schedules over 3 runs on the benchmark dataset +$ python sweep.py -ts body_only head_first_then_body -r 3 --benchmark -o training_schedule.csv + +--- + +To view results, we recommend using pandas dataframes: + +``` +import pandas as pd +df = pd.read_csv("results.csv", index_col=[0, 1, 2]) +``` + +""".format + +time_msg = """Total Time elapsed: {time} seconds.""".format + +output_msg = """Output has been saved to '{output_path}'.""".format + + +def _str_to_bool(string: str) -> bool: + """ Convert string to bool. """ + if string.lower() in ("yes", "true", "t", "y", "1"): + return True + elif string.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") + + +def _get_parser(default_params: Dict[str, List[Any]]) -> Namespace: + """ Get parser for this script. """ + parser = argparse.ArgumentParser( + description=argparse_desc_msg(), + epilog=argparse_epilog_msg(default_params=default_params), + formatter_class=RawTextHelpFormatter, + ) + parser.add_argument( + "--learning-rate", + "-lr", + dest="learning_rates", + nargs="+", + help="Learning rate - recommended options: [1e-3, 1e-4, 1e-5] ", + type=float, + ) + parser.add_argument( + "--epoch", + "-e", + dest="epochs", + nargs="+", + help="Epochs - recommended options: [3, 5, 10, 15]", + type=int, + ) + parser.add_argument( + "--batch-size", + "-bs", + dest="batch_sizes", + nargs="+", + help="Batch sizes - recommended options: [8, 16, 32, 64]", + type=int, + ) + parser.add_argument( + "--im-size", + "-is", + dest="im_sizes", + nargs="+", + help="Image sizes - recommended options: [299, 499]", + type=int, + ) + parser.add_argument( + "--architecture", + "-a", + dest="architectures", + nargs="+", + choices=["squeezenet1_1", "resnet18", "resnet34", "resnet50"], + help="Choose an architecture.", + type=str, + ) + parser.add_argument( + "--transform", + "-t", + dest="transforms", + nargs="+", + help="Tranform (data augmentation) - options: [True, False]", + type=_str_to_bool, + ) + parser.add_argument( + "--dropout", + "-d", + dest="dropouts", + nargs="+", + help="Dropout - recommended options: [0.5]", + type=float, + ) + parser.add_argument( + "--weight-decay", + "-wd", + dest="weight_decays", + nargs="+", + help="Weight decay - recommended options: [0.01]", + type=float, + ) + parser.add_argument( + "--training-schedule", + "-ts", + dest="training_schedules", + nargs="+", + choices=["head_only", "body_only", "head_first_then_body"], + help="Choose a training schedule", + type=str, + ) + parser.add_argument( + "--discriminative-lr", + "-dl", + dest="discriminative_lrs", + nargs="+", + help="Discriminative learning rate - options: [True, False]. To use discriminative learning rates, training schedule must not be 'head_only'", + choices=["True", "False"], + type=_str_to_bool, + ) + parser.add_argument( + "--one-cycle-policy", + "-ocp", + dest="one_cycle_policies", + nargs="+", + help="one cycle policy - options: [True, False]", + type=_str_to_bool, + ) + i_parser = parser.add_mutually_exclusive_group(required=True) + i_parser.add_argument( + "--inputs", + "-i", + dest="inputs", + nargs="+", + help="A list of data paths to run the tests on. The datasets must be structured so that each class is in a separate folder. <--benchmark> must be False", + type=str, + ) + i_parser.add_argument( + "--benchmark", + dest="benchmark", + action="store_true", + help="Whether or not to use curated benchmark datasets to test. <--input> must be empty", + ) + parser.add_argument( + "--early-stopping", + dest="early_stopping", + action="store_true", + help="Stop training early if possible", + ) + parser.add_argument( + "--repeat", + "-r", + dest="repeat", + help="The number of times to repeat each permutation", + type=int, + ) + parser.add_argument( + "--output", "-o", dest="output", help="The path of the output file." + ) + parser.set_defaults( + repeat=3, early_stopping=False, inputs=None, benchmark=False + ) + args = parser.parse_args() + + # if discriminative lr is on, we cannot have a 'head_only' + # training_schedule + if args.discriminative_lrs is not None and True in args.discriminative_lrs: + assert "head_only" not in args.training_schedules + + # get mapping of architecture enum: ex. "resnet34" --> + # Architecture.resnet34 -> models.resnet34 + if args.architectures is not None: + args.architectures = [Architecture[a] for a in args.architectures] + + # get mapping of training enum: ex. "head_only" --> + # TrainingSchedule.head_only --> 0 + if args.training_schedules is not None: + args.training_schedules = [ + TrainingSchedule[t] for t in args.training_schedules + ] + + return args + + +if __name__ == "__main__": + + start = time.time() + sweeper = ParameterSweeper() + args = _get_parser(sweeper.parameters) + + sweeper.update_parameters( + learning_rate=args.learning_rates, + epochs=args.epochs, + batch_size=args.batch_sizes, + im_size=args.im_sizes, + architecture=args.architectures, + transform=args.transforms, + dropout=args.dropouts, + weight_decay=args.weight_decays, + training_schedule=args.training_schedules, + discriminative_lr=args.discriminative_lrs, + one_cycle_policy=args.one_cycle_policies, + ) + + data = args.inputs + if not data: + data = Experiment.download_benchmark_datasets( + Path(data_path()) / "benchmark_data" + ) + + df = sweeper.run( + datasets=data, reps=args.repeat, early_stopping=args.early_stopping + ) + df.to_csv(args.output) + + if args.benchmark: + for path in args.inputs: + shutil.rmtree(path) + + end = time.time() + print(time_msg(time=round(end - start, 1))) + print(output_msg(output_path=os.path.realpath(args.output))) diff --git a/image_classification/tests/unit/constants.py b/image_classification/tests/unit/constants.py new file mode 100644 index 0000000..da5e417 --- /dev/null +++ b/image_classification/tests/unit/constants.py @@ -0,0 +1,3 @@ +from pathlib import Path + +TEMP_DIR = Path("tmp_data") diff --git a/image_classification/tests/unit/test_experiments.py b/image_classification/tests/unit/test_experiments.py new file mode 100644 index 0000000..e26aa8a --- /dev/null +++ b/image_classification/tests/unit/test_experiments.py @@ -0,0 +1,99 @@ +import os +import pytest +import shutil +import pandas as pd +from pathlib import Path +from utils_ic.datasets import Urls, unzip_url +from utils_ic.parameter_sweeper import * +from constants import TEMP_DIR + + +def cleanup_data(): + if os.path.exists(TEMP_DIR): + shutil.rmtree(TEMP_DIR) + + +@pytest.fixture(scope="module") +def setup_all_datasets(request): + """ Sets up all available datasets for testing on. """ + ParameterSweeper.download_benchmark_datasets(TEMP_DIR) + request.addfinalizer(cleanup_data) + + +@pytest.fixture(scope="module") +def setup_a_dataset(request): + """ Sets up a dataset for testing on. """ + os.makedirs(TEMP_DIR) + unzip_url(Urls.fridge_objects_path, TEMP_DIR, exist_ok=True) + request.addfinalizer(cleanup_data) + + +def _test_sweeper_run(df: pd.DataFrame, df_length: int): + """ Performs basic tests that all df should pass. + Args: + df (pd.DataFame): the df to check + df_length (int): to assert the len(df) == df_length + """ + # assert len + assert len(df) == df_length + # assert df is a multi-index dataframe + assert isinstance(df.index, pd.core.index.MultiIndex) + # assert clean_df works + df = clean_df(df) + assert isinstance(df.index, pd.core.index.MultiIndex) + # assert no error when calling plot_df function + plot_df(df) + + +def test_default_sweeper_single_dataset(setup_a_dataset): + """ Test default sweeper on a single dataset. """ + fridge_objects_path = TEMP_DIR / "fridgeObjects" + sweeper = ParameterSweeper() + df = sweeper.run([fridge_objects_path]) + _test_sweeper_run(df, df_length=3) + + # assert accuracy over 3 runs is > 85% + assert df.mean(level=(1))["accuracy"][0] > 0.85 + + +def test_default_sweeper_benchmark_dataset(setup_all_datasets): + """ + Test default sweeper on benchmark dataset. + WARNING: This test can take a while to execute since we run the sweeper + across all benchmark datasets. + """ + datasets = [Path(d) for d in os.scandir(TEMP_DIR) if os.path.isdir(d)] + sweeper = ParameterSweeper() + df = sweeper.run(datasets, reps=1) + _test_sweeper_run(df, df_length=len(datasets)) + + # assert min accuracy for each dataset + assert df.mean(level=(2)).loc["fridgeObjects", "accuracy"] > 0.85 + assert df.mean(level=(2)).loc["food101Subset", "accuracy"] > 0.75 + assert df.mean(level=(2)).loc["fashionTexture", "accuracy"] > 0.70 + assert df.mean(level=(2)).loc["flickrLogos32Subset", "accuracy"] > 0.75 + assert df.mean(level=(2)).loc["lettuce", "accuracy"] > 0.70 + assert df.mean(level=(2)).loc["recycle_v3", "accuracy"] > 0.85 + + +def test_update_parameters_01(setup_a_dataset): + """ Tests updating parameters. """ + fridge_objects_path = TEMP_DIR / "fridgeObjects" + sweeper = ParameterSweeper() + + # at this point there should only be 1 permutation of the default params + assert len(sweeper.permutations) == 1 + sweeper.update_parameters( + learning_rate=[1e-3, 1e-4, 1e-5], im_size=[299, 499], epochs=[5] + ) + # assert that there are not 6 permutations + assert len(sweeper.permutations) == 6 + df = sweeper.run([fridge_objects_path]) + _test_sweeper_run(df, df_length=18) + + +def test_update_parameters_02(setup_a_dataset): + """ Tests exception when updating parameters. """ + sweeper = ParameterSweeper() + with pytest.raises(Exception): + sweeper.update_parameters(bad_key=[1e-3, 1e-4, 1e-5]) diff --git a/image_classification/tests/unit/test_utils.py b/image_classification/tests/unit/test_utils.py index 60053a7..29c2834 100644 --- a/image_classification/tests/unit/test_utils.py +++ b/image_classification/tests/unit/test_utils.py @@ -21,7 +21,7 @@ def make_temp_data_dir(request): def _test_url_data(url: str, path: Union[Path, str], dir_name: str): - data_path = unzip_url(url, fpath=path, dest=path, overwrite=True) + data_path = unzip_url(url, fpath=path, dest=path, exist_ok=True) # assert zip file exists assert os.path.exists(os.path.join(path, f"{dir_name}.zip")) # assert unzipped file (titled {dir_name}) exists @@ -48,25 +48,6 @@ def test_unzip_url_abs_path(make_temp_data_dir): _test_url_data(Urls.recycle_path, abs_path, "recycle_v3") -def test_unzip_url_overwrite(make_temp_data_dir): - """ Test if overwrite is true and file exists """ - - # test overwrite=True - os.makedirs(TEMP_DIR / "fridgeObjects") - fridge_objects_path = unzip_url( - Urls.fridge_objects_path, TEMP_DIR, overwrite=True - ) - assert os.path.realpath(TEMP_DIR / "fridgeObjects") == os.path.realpath( - fridge_objects_path - ) - assert len(os.listdir(fridge_objects_path)) >= 0 - - # test file exists error when overwrite=False - os.makedirs(TEMP_DIR / "lettuce") - with pytest.raises(FileExistsError): - unzip_url(Urls.lettuce_path, TEMP_DIR, overwrite=False) - - def test_unzip_url_exist_ok(make_temp_data_dir): """ Test if exist_ok is true and (file exists, file does not exist) diff --git a/image_classification/utils_ic/common.py b/image_classification/utils_ic/common.py new file mode 100644 index 0000000..2df324f --- /dev/null +++ b/image_classification/utils_ic/common.py @@ -0,0 +1,14 @@ +import os +from pathlib import Path + + +def ic_root_path() -> Path: + """Get the image classification root path""" + return os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir)) + + +def data_path() -> Path: + """Get the data directory path""" + return os.path.realpath( + os.path.join(os.path.dirname(__file__), os.pardir, "data") + ) diff --git a/image_classification/utils_ic/datasets.py b/image_classification/utils_ic/datasets.py index 88199da..7fba84b 100644 --- a/image_classification/utils_ic/datasets.py +++ b/image_classification/utils_ic/datasets.py @@ -1,6 +1,6 @@ import os import requests -import shutil +from .common import data_path from pathlib import Path from typing import List, Union from urllib.parse import urljoin, urlparse @@ -19,6 +19,7 @@ class Urls: # datasets fridge_objects_path = urljoin(base, "fridgeObjects.zip") food_101_subset_path = urljoin(base, "food101Subset.zip") + fashion_texture_path = urljoin(base, "fashionTexture.zip") flickr_logos_32_subset_path = urljoin(base, "flickrLogos32Subset.zip") lettuce_path = urljoin(base, "lettuce.zip") recycle_path = urljoin(base, "recycle_v3.zip") @@ -38,15 +39,8 @@ def imagenet_labels() -> list: return [labels[str(k)][1] for k in range(len(labels))] -def data_path() -> Path: - """Get the data path""" - return os.path.realpath( - os.path.join(os.path.dirname(__file__), os.pardir, "data") - ) - - def _get_file_name(url: str) -> str: - """Get a file name based on url""" + """ Get a file name based on url. """ return urlparse(url).path.split("/")[-1] @@ -55,15 +49,21 @@ def unzip_url( fpath: Union[Path, str] = data_path(), dest: Union[Path, str] = data_path(), exist_ok: bool = False, - overwrite: bool = False, ) -> Path: - """ - Download file from URL to {fpath} and unzip to {dest}. + """ Download file from URL to {fpath} and unzip to {dest}. {fpath} and {dest} must be directories - Params: - exist_ok: if exist_ok, then skip if exists, otherwise throw error - overwrite: if overwrite, remove zipped file and unziped dir - Returns path of {dest} + + Args: + url (str): url to download from + fpath (Union[Path, str]): The location to save the url zip file to + dest (Union[Path, str]): The destination to unzip {fpath} + exist_ok (bool): if exist_ok, then skip if exists, otherwise throw error + + Raises: + FileExistsError: if file exists + + Returns: + Path of {dest} """ def _raise_file_exists_error(path: Union[Path, str]) -> None: @@ -78,16 +78,6 @@ def unzip_url( zip_file = Path(os.path.join(fpath, fname)) unzipped_dir = Path(os.path.join(fpath, fname_without_extension)) - if overwrite: - try: - os.remove(zip_file) - except OSError as e: - pass - try: - shutil.rmtree(unzipped_dir) - except OSError as e: - pass - # download zipfile if zipfile not exists if zip_file.is_file(): _raise_file_exists_error(zip_file) @@ -106,3 +96,20 @@ def unzip_url( z.close() return os.path.realpath(os.path.join(fpath, fname_without_extension)) + + +def unzip_urls( + urls: List[Url], dest: Union[Path, str] = data_path() +) -> List[Path]: + """ Download and unzip all datasets in Urls to dest """ + + # make dir if not exist + if not Path(dest).is_dir(): + os.makedirs(dest) + + # download all data urls + paths = list() + for url in urls: + paths.append(unzip_url(url, dest, exist_ok=True)) + + return paths diff --git a/image_classification/utils_ic/parameter_sweeper.py b/image_classification/utils_ic/parameter_sweeper.py new file mode 100644 index 0000000..993bd0c --- /dev/null +++ b/image_classification/utils_ic/parameter_sweeper.py @@ -0,0 +1,436 @@ +import itertools +import pandas as pd +import re +import time + +from utils_ic.datasets import Urls, data_path, unzip_urls +from collections import OrderedDict +from fastai.vision import * +from fastai.callbacks import EarlyStoppingCallback +from fastai.metrics import accuracy +from functools import partial +from matplotlib.axes import Axes +from typing import Union, List, Any, Dict +from pathlib import Path + +Time = float +parameter_flag = "PARAMETERS" + + +class TrainingSchedule(Enum): + head_only = ("head_only",) + body_only = ("body_only",) + head_first_then_body = "head_first_then_body" + + +class Architecture(Enum): + resnet18 = partial(models.resnet18) + resnet34 = partial(models.resnet34) + resnet50 = partial(models.resnet50) + squeezenet1_1 = partial(models.squeezenet1_1) + + +def clean_df(df: pd.DataFrame) -> pd.DataFrame: + """ + Cleans up experiment paramter strings in {df} by removing all experiment + parameters that held constant through each experiment. This method uses a + variable to search for strings. + Args: + df (pd.DataFrame): dataframe to clean up + Return: + pd.DataFrame: df with renamed experiment parameter strings + """ + text = df.to_html() + text = re.findall(fr">\s{{0,1}}{parameter_flag}\s{{0,1}}(.*?)", text) + + sets = [set(t.split("|")) for t in text] + intersection = sets[0].intersection(*sets) + + html = df.to_html() + for i in intersection: + html = html.replace(i, "") + html = html.replace("PARAMETERS", "P:") + html = html.replace("|", " ") + + return pd.read_html(html, index_col=[0, 1, 2])[0] + + +def plot_df( + df: pd.DataFrame, + sort_by: str = "accuracy", + figsize: Tuple[int, int] = (12, 8), +) -> None: + """ + Visuaize graph from {df}, which must contain columns "accuracy" and + "duration". + Args: + df (pd.DataFrame): the dataframe to visualize. + sort_by (str): whether to sort visualization by accuracy or duration. + figsize (Tuple[int, int]): as defined in matplotlib. + Raises: + ValueError: if {sort_by} is an invalid value. + """ + if sort_by not in ("accuracy", "duration"): + raise ValueError("{sort_by} must equal 'accuracy' or 'duration'") + + def add_value_labels( + ax: Axes, spacing: int = 5, percentage: bool = False + ) -> None: + """ + Add labels to the end of each bar in a bar chart. + Args: + ax (Axes): The matplotlib object containing the axes of the plot to annotate. + spacing (int): The distance between the labels and the bars. + percentage (bool): if y-value is a percentage + """ + for rect in ax.patches: + y_value = rect.get_height() + x_value = rect.get_x() + rect.get_width() / 2 + + label = ( + "{:.2f}%".format(y_value * 100) + if percentage + else "{:.1f}".format(y_value) + ) + + ax.annotate( + label, + (x_value, y_value), + xytext=(0, spacing), # Vertically shift label by `space` + textcoords="offset points", # Interpret `xytext` as offset in points + ha="center", # Horizontally center label + va="bottom", # Vertically align label + ) + + top_accuracy = df["accuracy"].max() + top_duration = df["duration"].max() + ax1, ax2 = df.sort_values(by=sort_by).plot.bar( + rot=90, subplots=True, legend=False, figsize=figsize + ) + ax1.set_title("Duration (seconds)") + ax2.set_title("Accuracy (%)") + ax1.set_ylabel("seconds") + ax2.set_ylabel("%") + ax1.set_ylim(top=top_duration * 1.2) + ax2.set_ylim(top=top_accuracy * 1.2) + add_value_labels(ax2, percentage=True) + add_value_labels(ax1) + + +class ParameterSweeper: + """ Test different permutations of a set of parameters. + + Attributes: + param_order : A fixed ordering of parameters (to match the ordering of ) + default_params : A dict of default parameters + params : The parameters to run experiments on + """ + + default_params = dict( + learning_rate=1e-4, + epoch=15, + batch_size=16, + im_size=299, + architecture=Architecture.resnet18, + transform=True, + dropout=0.5, + weight_decay=0.01, + training_schedule=TrainingSchedule.head_first_then_body, + discriminative_lr=False, + one_cycle_policy=True, + ) + + def __init__(self, **kwargs) -> None: + """ + Initialize class with default params if kwargs is empty. + Otherwise, initialize params with kwargs. + """ + self.params = OrderedDict( + learning_rate=[self.default_params.get("learning_rate")], + epochs=[self.default_params.get("epoch")], + batch_size=[self.default_params.get("batch_size")], + im_size=[self.default_params.get("im_size")], + architecture=[self.default_params.get("architecture")], + transform=[self.default_params.get("transform")], + dropout=[self.default_params.get("dropout")], + weight_decay=[self.default_params.get("weight_decay")], + training_schedule=[self.default_params.get("training_schedule")], + discriminative_lr=[self.default_params.get("discriminative_lr")], + one_cycle_policy=[self.default_params.get("one_cycle_policy")], + ) + + self.param_order = tuple(self.params.keys()) + self.update_parameters(**kwargs) + + @property + def parameters(self) -> Dict[str, Any]: + """ Returns parameters to test on if run() is called. """ + return self.params + + @property + def permutations(self) -> List[Tuple[Any]]: + """ Returns a list of all permutations, expressed in tuples. """ + params = tuple([self.params[k] for k in self.param_order]) + permutations = list(itertools.product(*params)) + return permutations + + @staticmethod + def _get_data_bunch( + path: Union[Path, str], transform: bool, im_size: int, bs: int + ) -> ImageDataBunch: + """ + Create ImageDataBunch and return it. TODO in future version is to allow + users to pass in their own image bunch or their own Transformation + objects (instead of using fastai's ) + + Args: + path (Union[Path, str]): path to data to create databunch with + transform (bool): a flag to set fastai default transformations (get_transforms()) + im_size (int): image size of databunch + bs (int): batch size of databunch + Returns: + ImageDataBunch + """ + path = path if type(path) is Path else Path(path) + tfms = get_transforms() if transform else None + return ( + ImageList.from_folder(path) + .split_by_rand_pct(valid_pct=0.33) + .label_from_folder() + .transform(tfms=tfms, size=im_size) + .databunch(bs=bs) + .normalize(imagenet_stats) + ) + + @staticmethod + def _early_stopping_callback( + metric: str = "accuracy", min_delta: float = 0.01, patience: int = 3 + ) -> partial: + """ Returns an early stopping callback. """ + return partial( + EarlyStoppingCallback, + monitor="accuracy", + min_delta=0.01, # conservative + patience=3, + ) + + @staticmethod + def _serialize_permutations(p: Tuple[Any]) -> str: + """ Serializes all parameters as a string that uses {parameter_flag}. """ + p = iter(p) + return ( + f"{parameter_flag} " + f"[learning_rate: {next(p)}]|[epochs: {next(p)}]|[batch_size: {next(p)}]|" + f"[im_size: {next(p)}]|[arch: {next(p).name}]|" + f"[transforms: {next(p)}]|[dropout: {next(p)}]|" + f"[weight_decay: {next(p)}]|[training_schedule: {next(p).name}]|" + f"[discriminative_lr: {next(p)}]|[one_cycle_policy: {next(p)}]" + ) + + @staticmethod + def _make_df_from_dict( + results: Dict[Any, Dict[Any, Dict[Any, Dict[Any, Any]]]] + ) -> pd.DataFrame: + """ Converts a 4-times-nested dictionary into a multi-index dataframe. """ + return pd.DataFrame.from_dict( + { + (i, j, k): results[i][j][k] + for i in results.keys() + for j in results[i].keys() + for k in results[i][j].keys() + }, + orient="index", + ) + + def _param_tuple_to_dict(self, params: Tuple[Any]) -> Dict[str, Any]: + """ Converts a tuple of parameters to a Dict. """ + return dict( + learning_rate=params[self.param_order.index("learning_rate")], + batch_size=params[self.param_order.index("batch_size")], + transform=params[self.param_order.index("transform")], + im_size=params[self.param_order.index("im_size")], + epochs=params[self.param_order.index("epochs")], + architecture=params[self.param_order.index("architecture")], + dropout=params[self.param_order.index("dropout")], + weight_decay=params[self.param_order.index("weight_decay")], + discriminative_lr=params[ + self.param_order.index("discriminative_lr") + ], + training_schedule=params[ + self.param_order.index("training_schedule") + ], + one_cycle_policy=params[ + self.param_order.index("one_cycle_policy") + ], + ) + + @classmethod + def download_benchmark_datasets( + cls, dest: Union[Path, str] = data_path() + ) -> List[Path]: + """ Download benchmark datasets to {dest}. """ + benchmark_urls = [ + Urls.fridge_objects_path, + Urls.fashion_texture_path, + Urls.flickr_logos_32_subset_path, + Urls.food_101_subset_path, + Urls.lettuce_path, + Urls.recycle_path, + ] + return unzip_urls(benchmark_urls, dest) + + def _learn( + self, data_path: Path, params: Tuple[Any], stop_early: bool + ) -> Tuple[Learner, Time]: + """ + Given a set of permutations, create a learner to train and validate on + the dataset. + Args: + data_path (Path): The location of the data to use + params (Tuple[Any]): The set of parameters to train and validate on + stop_early (bool): Whether or not to stop early if the evaluation + metric does not improve + Returns: + Tuple[Learner, Time]: Learn object from Fastai and the duration in + seconds it took. + """ + start = time.time() + params = self._param_tuple_to_dict(params) + + transform = params["transform"] + im_size = params["im_size"] + epochs = params["epochs"] + batch_size = params["batch_size"] + architecture = params["architecture"] + dropout = params["dropout"] + learning_rate = params["learning_rate"] + discriminative_lr = params["discriminative_lr"] + training_schedule = params["training_schedule"] + one_cycle_policy = params["one_cycle_policy"] + weight_decay = params["weight_decay"] + + data = self._get_data_bunch(data_path, transform, im_size, batch_size) + + callbacks = list() + if stop_early: + callbacks.append(_early_stopping_callback()) + + learn = cnn_learner( + data, + architecture.value, + metrics=accuracy, + ps=dropout, + callback_fns=callbacks, + ) + + head_learning_rate = learning_rate + body_learning_rate = ( + slice(learning_rate, 3e-3) if discriminative_lr else learning_rate + ) + + def fit( + learn: Learner, e: int, lr: Union[slice, float], wd=float + ) -> partial: + """ Returns a partial func for either fit_one_cycle or fit + depending on """ + return ( + partial(learn.fit_one_cycle, cyc_len=e, max_lr=lr, wd=wd) + if one_cycle_policy + else partial(learn.fit, epochs=e, lr=lr, wd=wd) + ) + + if training_schedule is TrainingSchedule.head_only: + if discriminative_lr: + raise Exception( + "Cannot run discriminative_lr if training schedule is head_only." + ) + else: + fit(learn, epochs, body_learning_rate, weight_decay)() + + elif training_schedule is TrainingSchedule.body_only: + learn.unfreeze() + fit(learn, epochs, body_learning_rate, weight_decay)() + + elif training_schedule is TrainingSchedule.head_first_then_body: + head_epochs = epochs // 4 + fit(learn, head_epochs, head_learning_rate, weight_decay)() + learn.unfreeze() + fit( + learn, epochs - head_epochs, body_learning_rate, weight_decay + )() + + end = time.time() + duration = end - start + + return learn, duration + + def update_parameters(self, **kwargs) -> None: + """ Update the class object's parameters. + If kwarg key is not in an existing param key, then raise exception. + If the kwarg value is None, pass. + Otherwise overwrite the corresponding self.params key. + """ + for k, v in kwargs.items(): + if k not in self.params.keys(): + raise Exception("Parameter {k} is invalid.") + if v is None: + continue + self.params[k] = v + + def run( + self, datasets: List[Path], reps: int = 3, early_stopping: bool = False + ) -> pd.DataFrame: + """ Performs the experiment. + Iterates through the number of specified , the list permutations + as defined in this class, and the to calculate evaluation + metrics and duration for each run. + + WARNING: this method can take a long time depending on your experiment + definition. + + Args: + datasets (List[Path]): A list of datasets to iterate over. + reps (int): The number of runs to loop over. + early_stopping (bool): Whether we want to perform early stopping. + Returns: + pd.DataFrame: a multi-index dataframe with the results stored in it. + """ + + res = dict() + for rep in range(reps): + + res[rep] = dict() + for i, permutation in enumerate(self.permutations): + print( + f"Running {i+1} of {len(self.permutations)} permutations. " + f"Repeat {rep+1} of {reps}." + ) + + stringified_permutation = self._serialize_permutations( + permutation + ) + res[rep][stringified_permutation] = dict() + for dataset in datasets: + + data_name = os.path.basename(dataset) + + res[rep][stringified_permutation][data_name] = dict() + + learn, duration = self._learn( + dataset, permutation, early_stopping + ) + + _, metric = learn.validate( + learn.data.valid_dl, metrics=[accuracy] + ) + + res[rep][stringified_permutation][data_name][ + "duration" + ] = duration + res[rep][stringified_permutation][data_name][ + "accuracy" + ] = float(metric) + + learn.destroy() + + return self._make_df_from_dict(res)