remove dead code, move CI pipeline to min python 3.8 which is needed by pytorch-lightning.

This commit is contained in:
Chris Lovett 2023-04-04 19:30:15 -07:00
Родитель 61a36ecf99
Коммит 0b30fa121b
27 изменённых файлов: 73 добавлений и 1650 удалений

4
.github/workflows/build-publish-docs.yml поставляемый
Просмотреть файл

@ -21,10 +21,10 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Sets up Conda with Python 3.7 - name: Sets up Conda with Python 3.8
uses: conda-incubator/setup-miniconda@v2 uses: conda-incubator/setup-miniconda@v2
with: with:
python-version: 3.7 python-version: 3.8
activate-environment: archai activate-environment: archai
- name: Installs the requirements - name: Installs the requirements
shell: bash -l {0} shell: bash -l {0}

4
.github/workflows/build-release-pypi.yml поставляемый
Просмотреть файл

@ -14,10 +14,10 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Sets up Python 3.7 - name: Sets up Python 3.8
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: 3.7 python-version: 3.8
- name: Installs pypa/build - name: Installs pypa/build
shell: bash -l {0} shell: bash -l {0}
run: | run: |

4
.github/workflows/lint-run-unit-tests.yaml поставляемый
Просмотреть файл

@ -19,9 +19,9 @@ jobs:
name: Lints with `flake8` and run unit tests with `pytest` name: Lints with `flake8` and run unit tests with `pytest`
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
platform: [ windows-latest, ubuntu-latest ] platform: [ windows-latest, ubuntu-latest ]
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.8", "3.9", "3.10"]
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
steps: steps:
- name: Pulls the repository - name: Pulls the repository

2
.github/workflows/run-notebook-tests.yml поставляемый
Просмотреть файл

@ -21,7 +21,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.8", "3.9", "3.10"]
steps: steps:
- name: Pulls the repository - name: Pulls the repository
uses: actions/checkout@v3 uses: actions/checkout@v3

4
.gitignore поставляемый
Просмотреть файл

@ -160,4 +160,6 @@ dataroot/
temp_code/ temp_code/
mnist_test_run*/ mnist_test_run*/
logs/ logs/
output/ output/
snpe-2.5.0.4052.zip
android-ndk-r25b-linux.zip

Просмотреть файл

@ -37,7 +37,7 @@ To install Archai via PyPI, the following command can be executed:
pip install archai pip install archai
``` ```
**Archai requires Python 3.7+ and PyTorch 1.7.0+ to function properly.** **Archai requires Python 3.8+ and PyTorch 1.7.0+ to function properly.**
For further information, please consult the [installation guide](https://microsoft.github.io/archai/getting_started/installation.html). For further information, please consult the [installation guide](https://microsoft.github.io/archai/getting_started/installation.html).

Просмотреть файл

@ -1,63 +0,0 @@
# Qualcomm SNPE SDK requires ubuntu:18.04 and python 3.6!
FROM mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest
ARG SNPE_SDK_ZIP
ARG SNPE_SDK_ROOT
ARG ANDROID_NDK_ZIP
ARG ANDROID_NDK_ROOT
ENV AZUREML_CONDA_ENVIRONMENT_PATH="/opt/miniconda/envs/snap"
RUN apt-get update && apt-get install -y build-essential libtool autoconf cmake unzip wget git curl python3 python3-dev python3-distutils python3-pip ffmpeg libsm6 libxext6 wget locales libjpeg-dev zlib1g zlib1g-dev libprotobuf-dev protobuf-compiler
# need cmake 3.22 to build latest version of onnx-simplifier on Ubuntu 18
RUN apt update && \
apt install -y software-properties-common lsb-release && \
apt clean all
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
RUN apt-add-repository "deb https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main"
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 6AF7F09730B3F0A4
RUN apt update && apt install -y cmake
# build latest version of protobuf needed by onnx
RUN git clone https://github.com/protocolbuffers/protobuf.git
RUN cd protobuf && \
git checkout v3.16.0 && \
git submodule update --init --recursive && \
mkdir build_source && cd build_source && \
cmake ../cmake -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release && \
make -j$(nproc) && \
make install
# have to ensure default locale is utf-8 otherwise python bails with this error:
# UnicodeEncodeError: 'ascii' codec can't encode character '\xe7' in position 17: ordinal not in range(128)
RUN locale-gen en_US.UTF-8
WORKDIR /home/archai/snpe
COPY "${SNPE_SDK_ZIP}" .
RUN unzip "${SNPE_SDK_ZIP}"
WORKDIR /home/archai/ndk
COPY "${ANDROID_NDK_ZIP}" .
RUN unzip "${ANDROID_NDK_ZIP}"
WORKDIR /home/archai
copy requirements.txt .
ENV SNPE_ROOT="/home/archai/snpe/$SNPE_SDK_ROOT"
ENV ANDROID_NDK_ROOT="/home/archai/ndk/$ANDROID_NDK_ROOT"
ENV PATH="$PATH:/home/archai:/home/archai/ndk/tools/${PLATFORM_TOOLS_ROOT}:/home/archai/snpe/${SNPE_SDK_ROOT}/bin/x86_64-linux-clang"
ENV LC_ALL="en_US.UTF-8"
ENV PYTHONPATH="/home/archai/snpe/$SNPE_SDK_ROOT/lib/python"
ENV LD_LIBRARY_PATH="${AZUREML_CONDA_ENVIRONMENT_PATH}/lib:${SNPE_ROOT}/lib/x86_64-linux-clang:${LD_LIBRARY_PATH}"
# initialize conda environment. (conda comes with mcr.microsoft.com/azureml)
RUN conda init bash
# this is how you activate a conda environment in Dockerfile
ENV PATH="${AZUREML_CONDA_ENVIRONMENT_PATH}/bin:$PATH"
# Create conda environment
RUN conda create -y -n snap python=3.6 pip=20.2.4
RUN pip install -r requirements.txt

Просмотреть файл

@ -1,43 +0,0 @@
# Readme
This folder contains docker setup for creating a custom Azure ML docker container that can access
Qualcomm 888 dev kits via usb using the Android `adb` tool. This docker container is designed to run
in a local minikube cluster on the machine that has the Qualcomm 888 dev boards plugged in so that
these devices become available as an Azure ML Arc kubernetes compute cluster for use in [Azure ML
Pipelines](https://learn.microsoft.com/en-us/azure/machine-learning/tutorial-pipeline-python-sdk).
This way you can do a full Archai network search, Azure ML training of the models, SNPE
Quantization, and evaluation of inference times on the target Qualcomm 888 DSP hardware all in one
very cool Azure ML Pipeline.
First you will need to decide which Azure Subscription to use, install the
[Azure Command Line Interface](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-windows?tabs=azure-cli)
and run `az account set --subscription id` to make the this subscription your default.
The setup script requires the following environment variables be set before hand:
- **SNPE_SDK** - points to a local zip file containing SNPE SDK version `snpe-1.64.0_3605.zip`
- **ANDROID_NDK** - points to a local zip file containing the Android NDK zip file version `android-ndk-r23b-linux.zip`
- **INPUT_TESTSET** - points to a local zip file containing 10,000 image test set from your dataset.
The [SNPE Readme](../snpe/readme.md) shows where to find those zip files.
After running this script you will see further instructions, first a docker command line in case you
want to build the docker image that runs in a kubernetes cluster.
## Dockerfile
This builds a docker image that you can run in a Azure Kubernetes cluster that will do SNPE model
quantization in the cloud. This frees up your Linux box that is managing Qualcomm devices and helps
you increase your Qualcomm device utilization.
The `Setup.ps1` script shows what docker commands to run to build the image, how to login to your
azure docker container registry, how to take your image for that container registry and push it
to Azure. So you do not need to use the public docker.org container registry. You will decide
what version number to attach to your image here and the same version needs to be specified in the
following `quant.yaml`.
## minikube
Once the docker image is published you can configure your local Kubernetes cluster. If you are
using `minikube` you can run this command to switch your `kubectl` command to operate on
minikube's docker environment: `eval $(minikube -p minikube docker-env)`.

Просмотреть файл

@ -1,22 +0,0 @@
importlib-metadata!=4.7.0,<6,>=3.7.0
packaging<22.0,>=20.0
cryptography<39,>=38.0.0
psutil
tqdm
pandas
scipy
numpy
ipykernel
sphinx
azure-storage-blob
azure-data-tables
azureml-mlflow
scikit-image
scikit-learn
scikit-build
opencv-contrib-python
onnx==1.11.0
onnxruntime
onnx-simplifier
pyyaml
matplotlib

Просмотреть файл

@ -1,19 +0,0 @@
SNPE_SDK_ZIP=snpe-2.5.0.4052.zip
SNPE_SDK_ROOT=snpe-2.5.0.4052
ANDROID_NDK_ZIP=android-ndk-r23b-linux.zip
ANDROID_NDK_ROOT=android-ndk-r23b
set -e
if ! [ -f ${ANDROID_NDK_ZIP} ]; then
curl -O --location "https://dl.google.com/android/repository/${ANDROID_NDK_ZIP}"
fi
if ! [ -f ${SNPE_SDK_ZIP} ]; then
echo "Please download the ${SNPE_SDK_ZIP} from :"
echo "https://developer.qualcomm.com/downloads/qualcomm-neural-processing-sdk-linux-v2050"
echo "and place the file in this folder."
exit 1
fi
docker build . --build-arg "SNPE_SDK_ZIP=${SNPE_SDK_ZIP}" --build-arg "SNPE_SDK_ROOT=${SNPE_SDK_ROOT}" --build-arg "ANDROID_NDK_ZIP=${ANDROID_NDK_ZIP}" --build-arg "ANDROID_NDK_ROOT=${ANDROID_NDK_ROOT}"

Просмотреть файл

@ -57,7 +57,7 @@ ENV LD_LIBRARY_PATH="${AZUREML_CONDA_ENVIRONMENT_PATH}/lib:${SNPE_SDK_ROOT}/lib/
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
RUN bash Miniconda3-latest-Linux-x86_64.sh -b -p ./miniconda3 RUN bash Miniconda3-latest-Linux-x86_64.sh -b -p ./miniconda3
RUN conda init bash RUN conda init bash
RUN conda create -y -n snap37 python=3.7 pip=20.2.4 RUN conda create -y -n snap37 python=3.8 pip=20.2.4
ENV PATH="${AZUREML_CONDA_ENVIRONMENT_PATH}/bin:$PATH" ENV PATH="${AZUREML_CONDA_ENVIRONMENT_PATH}/bin:$PATH"
RUN wget -O azcopy_v10.tar.gz https://aka.ms/downloadazcopy-v10-linux && tar -xf azcopy_v10.tar.gz --strip-components=1 RUN wget -O azcopy_v10.tar.gz https://aka.ms/downloadazcopy-v10-linux && tar -xf azcopy_v10.tar.gz --strip-components=1
@ -65,22 +65,24 @@ RUN wget -O azcopy_v10.tar.gz https://aka.ms/downloadazcopy-v10-linux && tar -xf
# this echo is a trick to bypass docker build cache. # this echo is a trick to bypass docker build cache.
# simply change the echo string every time you want docker build to pull down new bits. # simply change the echo string every time you want docker build to pull down new bits.
RUN echo '03/17/2023 04:30 PM' >/dev/null && git clone https://github.com/microsoft/archai.git RUN echo '03/17/2023 04:30 PM' >/dev/null && git clone https://github.com/microsoft/archai.git
RUN cd archai && git checkout task_segmentation
RUN cd archai && pip install -e .[dev]
RUN echo "using this pip version: " && which pip RUN echo "using this pip version: " && which pip
RUN echo "using this python version: " && which python RUN echo "using this python version: " && which python
RUN pushd /home/archai/image_segmentation/snpe && \ RUN pushd /home/archai/archai/tasks/face_segmentation/snpe && \
python --version && \ python --version && \
pip install -r requirements.txt pip install -r requirements.txt
# Install our olive2 branch # Install our olive2 branch
RUN echo '03/17/2023 04:20 PM' >/dev/null && git clone "https://github.com/microsoft/Olive.git" RUN echo '03/17/2023 04:20 PM' >/dev/null && git clone "https://github.com/microsoft/Olive.git"
RUN cd Olive && && pip install -e . RUN cd Olive && pip install -e .
RUN python -m olive.snpe.configure
RUN pip list RUN pip list
# This container starts running immediately so that the kubernetes cluster simply scales # This container starts running immediately so that the kubernetes cluster simply scales
# automatically by how busy this run script is and we don't have to manually launch jobs. # automatically based on how busy this script is (see HorizontalPodAutoscaler in quant.yaml).
COPY run.sh /home/archai/run.sh COPY run.sh /home/archai/run.sh
RUN ls -al /home/archai RUN ls -al /home/archai
RUN cat run.sh RUN cat run.sh

Просмотреть файл

@ -13,14 +13,11 @@ fi
python -m olive.snpe.configure python -m olive.snpe.configure
pushd $SNPE_ROOT
source $SNPE_ROOT/bin/envsetup.sh -o $SNPE_ROOT/python36-env/lib/python3.6/site-packages/onnx
popd
pushd /home/archai/experiment pushd /home/archai/experiment
while true while true
do do
python -u /home/archai/image_segmentation/snpe/azure/runner.py python -u /home/archai/archai/tasks/face_segmentation/snpe/azure/runner.py
if [ $? != 0 ]; then if [ $? != 0 ]; then
echo "Script returned an error code!" echo "Script returned an error code!"
fi fi

Просмотреть файл

@ -188,7 +188,7 @@ Write-Host "Test set url is $test_set_url"
# ================= Write out info/next steps ================ # ================= Write out info/next steps ================
Write-Host "" Write-Host ""
Write-Host docker build --build-arg --build-arg "`"MODEL_STORAGE_CONNECTION_STRING=$conn_str`"" ` Write-Host docker build --build-arg "`"MODEL_STORAGE_CONNECTION_STRING=$conn_str`"" `
--build-arg "SNPE_SDK_ZIP=$snpe_sdk_zip" --build-arg "SNPE_SDK_ROOT=$snpe_root" ` --build-arg "SNPE_SDK_ZIP=$snpe_sdk_zip" --build-arg "SNPE_SDK_ROOT=$snpe_root" `
--build-arg "ANDROID_NDK_ZIP=$android_sdk_zip" --build-arg "ANDROID_NDK_ROOT=$android_ndk_root" ` --build-arg "ANDROID_NDK_ZIP=$android_sdk_zip" --build-arg "ANDROID_NDK_ROOT=$android_ndk_root" `
. --progress plain . --progress plain

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Двоичный файл не отображается.

Просмотреть файл

@ -1 +0,0 @@
pip install -r requirements.txt --extra-index-url https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/

Просмотреть файл

@ -1,218 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# IOT Hub Management\n",
"\n",
"This notebook can be used to configure your IOT hub so it can talk to your Qualcom HDK 888 devices."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"sys.path += ['./scripts']"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Authentication package\n",
"from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential\n",
"\n",
"try:\n",
" credential = DefaultAzureCredential()\n",
" # Check if given credential can get token successfully.\n",
" credential.get_token(\"https://management.azure.com/.default\")\n",
"except Exception as ex:\n",
" # Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work\n",
" credential = InteractiveBrowserCredential()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"# %pip install azure-mgmt-iothub\n",
"# %pip install azure-iot-device\n",
"# %pip install azure-iot-hub\n"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
" \n",
"config = json.load(open(\".azureml/config.json\"))\n",
"\n",
"subscription = config['subscription_id']\n",
"iot_resource_group = config['iot_resource_group']\n",
"iot_hub_name = config['iot_hub_name']\n",
"location = config['location']\n",
"iot_hub_connection_string = config['iot_hub_connection_string']"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"\n",
"from azure.mgmt.iothub import IotHubClient\n",
"\n",
"iothub_client = IotHubClient(\n",
" credential,\n",
" subscription\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"MsrSnpeDeviceHub\n"
]
}
],
"source": [
"async_iot_hub = iothub_client.iot_hub_resource.begin_create_or_update(\n",
" iot_resource_group,\n",
" iot_hub_name,\n",
" {\n",
" 'location': location,\n",
" 'subscriptionid': subscription,\n",
" 'resourcegroup': iot_resource_group,\n",
" 'sku': {\n",
" 'name': 'S1',\n",
" 'capacity': 2\n",
" }\n",
" }\n",
")\n",
"\n",
"iot_hub = async_iot_hub.result() # Blocking wait for creation\n",
"\n",
"print(iot_hub.name)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['f7a32443']\n"
]
}
],
"source": [
"from utils import Adb\n",
"\n",
"adb = Adb()\n",
"devices = adb.get_devices()\n",
"\n",
"print(devices)"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"2492867a\n"
]
}
],
"source": [
"from azure.iot.hub import IoTHubRegistryManager\n",
"from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult, Twin\n",
"from azure.iot.device import IoTHubDeviceClient, MethodResponse\n",
"\n",
"iothub_registry_manager = IoTHubRegistryManager(iot_hub_connection_string)\n",
"device = iothub_registry_manager.get_twin('2492867a')\n",
"\n",
"print(device.device_id)"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<azure.iot.hub.protocol.models.twin_py3.Twin at 0x249877eadd0>"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# update the device twin to communicate the status of the device.\n",
"iothub_registry_manager.update_twin(device.device_id, Twin(tags={'device_type': 'android', 'online': True}))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "archai",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "6e17fa15dde6767600fffe17352775c2dcab3bfbf1fd9e12a03b13b1554fc994"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1,597 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
"# Connect to Azure Machine Learning Workspace \n",
"import json\n",
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
"\n",
"# make sure we have a scripts dir for the code to run our jobs.\n",
"import os\n",
"scripts_dir = \"./scripts\"\n",
"os.makedirs(scripts_dir, exist_ok=True)\n",
"\n",
"config_file = \".azureml/config.json\"\n",
"config = json.load(open(config_file, 'r'))\n",
"\n",
"for required_key in ['subscription_id', 'resource_group', 'workspace_name', 'storage_account_key', 'storage_account_name']:\n",
" if not required_key in config:\n",
" print(f\"### Error: please add a {required_key} to {config_file}\")\n",
"\n",
"storage_account_key = config['storage_account_key'] \n",
"storage_account_name = config['storage_account_name']"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [],
"source": [
"# Handle to the workspace\n",
"from azure.ai.ml import MLClient\n",
"\n",
"# Authentication package\n",
"from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential\n",
"\n",
"try:\n",
" credential = DefaultAzureCredential()\n",
" # Check if given credential can get token successfully.\n",
" credential.get_token(\"https://management.azure.com/.default\")\n",
"except Exception as ex:\n",
" # Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work\n",
" credential = InteractiveBrowserCredential()"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Found the config file in: .azureml\\config.json\n"
]
}
],
"source": [
"# Get a handle to the workspace\n",
"ml_client = MLClient.from_config(\n",
" credential=credential,\n",
" path='./.azureml/config.json'\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset with name face_segmentation_dataset was registered to workspace, the dataset version is 1.0.1\n"
]
}
],
"source": [
"from azure.ai.ml.entities import Data\n",
"from azure.ai.ml.constants import AssetTypes\n",
"\n",
"face_data = Data(\n",
" name=\"face_segmentation_dataset\",\n",
" path=\"https://nasfacemodels.blob.core.windows.net/downloads/099000.zip\",\n",
" type=AssetTypes.URI_FILE,\n",
" description=\"Dataset for quantizing the model\",\n",
" tags={\"source_type\": \"web\", \"source\": \"azure\"},\n",
" version=\"1.0.1\",\n",
")\n",
"\n",
"face_data = ml_client.data.create_or_update(face_data)\n",
"print(\n",
" f\"Dataset with name {face_data.name} was registered to workspace, the dataset version is {face_data.version}\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AzureBlobDatastore({'type': <DatastoreType.AZURE_BLOB: 'AzureBlob'>, 'name': 'nassnpe205test', 'description': 'Datastore pointing to our dataset container.', 'tags': {}, 'properties': {}, 'print_as_yaml': True, 'id': '/subscriptions/c8b7f913-60fb-4759-a310-fc5630e56f99/resourceGroups/snpe-aml-rg/providers/Microsoft.MachineLearningServices/workspaces/snpe-aml-workspace/datastores/nassnpe205test', 'Resource__source_path': None, 'base_path': 'd:\\\\git\\\\microsoft\\\\image_segmentation\\\\snpe\\\\notebooks\\\\quantize', 'creation_context': None, 'serialize': <msrest.serialization.Serializer object at 0x0000017746404D30>, 'credentials': {'type': 'account_key'}, 'container_name': 'models', 'account_name': 'nassnpe205test', 'endpoint': 'core.windows.net', 'protocol': 'https'})"
]
},
"execution_count": 47,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from azure.ai.ml.entities import AzureBlobDatastore\n",
"from azure.ai.ml.entities._credentials import AccountKeyConfiguration\n",
"\n",
"# Register the blob store container for storing our models\n",
"data_store = AzureBlobDatastore(\n",
" name=\"nassnpe205test\",\n",
" description=\"Datastore pointing to our dataset container.\",\n",
" account_name=storage_account_name,\n",
" container_name=\"models\",\n",
" credentials=AccountKeyConfiguration(\n",
" account_key=storage_account_key\n",
" ),\n",
")\n",
"\n",
"ml_client.create_or_update(data_store)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [],
"source": [
"# This is the path shown in the Data asset we created to point to this model...\n",
"subscription = ml_client.subscription_id\n",
"rg = ml_client.resource_group_name\n",
"ws_name = ml_client.workspace_name\n",
"\n",
"#datastore_path = f\"azureml://datastores/nasfacemodels/paths/Deci1\"\n",
"datastore_path = f\"azureml://datastores/nassnpe205test/paths/Deci2\"\n",
" \n",
"model_path = f\"{datastore_path}/deci_optimized_2.onnx\"\n",
"dlc_path = f\"{datastore_path}/model.dlc\"\n",
"quant_dlc_path = f\"{datastore_path}/model.quant.dlc\"\n",
"dlc_info_path = f\"{datastore_path}/model.info.txt\"\n",
"quant_dlc_info_path = f\"{datastore_path}/model.quant.info.txt\""
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found your Kubernetes cluster named snpe-compute, awesome!\n"
]
}
],
"source": [
"from azure.ai.ml.entities import AmlCompute\n",
"\n",
"snpe_cluster = \"snpe-compute\"\n",
"\n",
"try:\n",
" # let's see if the compute target already exists\n",
" cpu_cluster = ml_client.compute.get(snpe_cluster)\n",
" print(\n",
" f\"Found your Kubernetes cluster named {snpe_cluster}, awesome!\"\n",
" )\n",
"\n",
"except Exception:\n",
" print(f\"Computer cluster named {snpe_cluster} was not found ...\")"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Environment with name quantization is registered to workspace, the environment version is 1.3\n"
]
}
],
"source": [
"from azure.ai.ml.entities import Environment\n",
"\n",
"custom_env_name = \"quantization\"\n",
"\n",
"pipeline_job_env = Environment(\n",
" name=custom_env_name,\n",
" description=\"Custom environment for running the quantizer pipeline\", \n",
" image=\"snpecontainerregistry001.azurecr.io/snpe-2.5.0\",\n",
" version=\"1.3\",\n",
")\n",
"pipeline_job_env = ml_client.environments.create_or_update(pipeline_job_env)\n",
"\n",
"print(\n",
" f\"Environment with name {pipeline_job_env.name} is registered to workspace, the environment version is {pipeline_job_env.version}\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"from azure.ai.ml import command\n",
"from azure.ai.ml import Input, Output\n",
"\n",
"data_prep_component = command(\n",
" name=\"data_prep\",\n",
" display_name=\"Data preparation for quantization\",\n",
" description=\"Unzips the dataset and converts to .bin format\",\n",
" inputs={\n",
" \"data\": Input(type=\"uri_folder\")\n",
" },\n",
" outputs= {\n",
" \"quant_data\": Output(type=\"uri_folder\", mode=\"rw_mount\")\n",
" },\n",
"\n",
" # The source folder of the component\n",
" code=scripts_dir,\n",
" command=\"\"\"python3 data_prep.py \\\n",
" --data ${{inputs.data}} \\\n",
" --output ${{outputs.quant_data}} \\\n",
" \"\"\",\n",
" environment=f\"{pipeline_job_env.name}:{pipeline_job_env.version}\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [],
"source": [
"convert_component = command(\n",
" name=\"convert\",\n",
" display_name=\"Convert .onnx to .dlc\",\n",
" description=\"Converts the onnx model to dlc format\",\n",
" inputs={\n",
" \"model\": Input(type=\"uri_file\")\n",
" },\n",
" outputs= {\n",
" \"dlc\": Output(type=\"uri_file\", path=dlc_path, mode=\"rw_mount\")\n",
" },\n",
"\n",
" # The source folder of the component\n",
" code=scripts_dir,\n",
" command=\"\"\"python3 convert.py \\\n",
" --model ${{inputs.model}} \\\n",
" --output ${{outputs.dlc}} \\\n",
" \"\"\",\n",
" environment=f\"{pipeline_job_env.name}:{pipeline_job_env.version}\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [],
"source": [
"quant_component = command(\n",
" name=\"quantize_model\",\n",
" display_name=\"Quantize the Model\",\n",
" description=\"Runs snpe-dlc-quant on the model using the prepared quantization dataset\",\n",
" inputs={\n",
" \"data\": Input(type=\"uri_folder\"),\n",
" \"list_file\": Input(type=\"string\"),\n",
" \"model\": Input(type=\"uri_folder\")\n",
" },\n",
" outputs= {\n",
" \"quant_model\": Output(type=\"uri_file\", path=quant_dlc_path, mode=\"rw_mount\")\n",
" },\n",
"\n",
" # The source folder of the component\n",
" code=scripts_dir,\n",
" command=\"\"\"python3 quantize.py \\\n",
" --data ${{inputs.data}} \\\n",
" --list_file ${{inputs.list_file}} \\\n",
" --model ${{inputs.model}} \\\n",
" --output ${{outputs.quant_model}} \\\n",
" \"\"\",\n",
" environment=f\"{pipeline_job_env.name}:{pipeline_job_env.version}\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [],
"source": [
"def info_component(output_path):\n",
" return command(\n",
" name=\"model_info\",\n",
" display_name=\"Get model metrics\",\n",
" description=\"Runs snpe-dlc-info on the input .dlc model\",\n",
" inputs={\n",
" \"model\": Input(type=\"uri_folder\")\n",
" },\n",
" outputs= {\n",
" \"info\": Output(type=\"uri_file\", path=output_path, mode=\"rw_mount\")\n",
" },\n",
"\n",
" # The source folder of the component\n",
" code=scripts_dir,\n",
" command=\"\"\"python3 dlc_info.py \\\n",
" --model ${{inputs.model}} \\\n",
" --output ${{outputs.info}} \\\n",
" \"\"\",\n",
" environment=f\"{pipeline_job_env.name}:{pipeline_job_env.version}\",\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [],
"source": [
"# the dsl decorator tells the sdk that we are defining an Azure ML pipeline\n",
"from azure.ai.ml import dsl, Input, Output\n",
"\n",
"@dsl.pipeline(\n",
" compute=snpe_cluster,\n",
" description=\"Quantization pipeline\",\n",
")\n",
"def quantization_pipeline(\n",
" pipeline_job_data_input,\n",
" model_input\n",
"):\n",
" # using data_prep_function like a python call with its own inputs\n",
" data_prep_job = data_prep_component(\n",
" data=pipeline_job_data_input\n",
" )\n",
"\n",
" # convert onnx to dlc\n",
" convert_job = convert_component(\n",
" model=model_input\n",
" )\n",
"\n",
" # get the dlc info on the converted model\n",
" info_job = info_component(dlc_info_path)(\n",
" model=convert_job.outputs.dlc\n",
" )\n",
"\n",
" # quantize the dlc model\n",
" quant_job = quant_component(\n",
" data=data_prep_job.outputs.quant_data,\n",
" list_file='input_list.txt',\n",
" model=convert_job.outputs.dlc\n",
" )\n",
"\n",
" # get the dlc info on quantized model\n",
" info_job = info_component(quant_dlc_info_path)(\n",
" model=quant_job.outputs.quant_model\n",
" )\n",
"\n",
" # a pipeline returns a dictionary of outputs\n",
" # keys will code for the pipeline output identifier\n",
" return {\n",
" \"pipeline_job_model\": convert_job.outputs.dlc,\n",
" \"pipeline_job_quant_model\": quant_job.outputs.quant_model,\n",
" \"pipeline_job_info\": info_job.outputs.info\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
"# Let's instantiate the pipeline with the parameters of our choice\n",
"pipeline = quantization_pipeline(\n",
" pipeline_job_data_input=Input(type=\"uri_file\", path=face_data.path),\n",
" model_input=Input(type=\"uri_file\", path=model_path)\n",
")\n",
"\n",
"# submit the pipeline job\n",
"pipeline_job = ml_client.jobs.create_or_update(\n",
" pipeline,\n",
" # Project's name\n",
" experiment_name=\"quantization_test_run\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import webbrowser\n",
"# open the pipeline in web browser\n",
"webbrowser.open(pipeline_job.services[\"Studio\"].endpoint)"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [],
"source": [
"ml_client.jobs.download(pipeline_job.name, output_name='pipeline_job_info')"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"name: strong_pasta_wgv176q8tw\n",
"display_name: quantization_pipeline\n",
"description: Quantization pipeline\n",
"type: pipeline\n",
"inputs:\n",
" pipeline_job_data_input:\n",
" mode: ro_mount\n",
" type: uri_file\n",
" path: azureml:https://nasfacemodels.blob.core.windows.net/downloads/099000.zip\n",
" model_input:\n",
" mode: ro_mount\n",
" type: uri_file\n",
" path: azureml://datastores/nassnpe205test/paths/Deci2/deci_optimized_2.onnx\n",
"outputs:\n",
" pipeline_job_model:\n",
" mode: rw_mount\n",
" type: uri_file\n",
" path: azureml://datastores/nassnpe205test/paths/Deci2/model.dlc\n",
" pipeline_job_quant_model:\n",
" mode: rw_mount\n",
" type: uri_file\n",
" path: azureml://datastores/nassnpe205test/paths/Deci2/model.quant.dlc\n",
" pipeline_job_info:\n",
" mode: rw_mount\n",
" type: uri_file\n",
" path: azureml://datastores/nassnpe205test/paths/Deci2/model.quant.info.txt\n",
"jobs:\n",
" data_prep_job:\n",
" type: command\n",
" inputs:\n",
" data:\n",
" path: ${{parent.inputs.pipeline_job_data_input}}\n",
" outputs:\n",
" quant_data:\n",
" mode: rw_mount\n",
" type: uri_folder\n",
" component: azureml:azureml_anonymous:27028d4f-0db8-49a6-a997-74422e5a037b\n",
" convert_job:\n",
" type: command\n",
" inputs:\n",
" model:\n",
" path: ${{parent.inputs.model_input}}\n",
" outputs:\n",
" dlc: ${{parent.outputs.pipeline_job_model}}\n",
" component: azureml:azureml_anonymous:d8fc98af-0f29-4b27-98ef-3993a109b122\n",
" info_job_1:\n",
" type: command\n",
" inputs:\n",
" model:\n",
" path: ${{parent.jobs.convert_job.outputs.dlc}}\n",
" outputs:\n",
" info:\n",
" mode: rw_mount\n",
" type: uri_file\n",
" path: azureml://datastores/nassnpe205test/paths/Deci2/model.info.txt\n",
" component: azureml:azureml_anonymous:d0525a94-b93d-4301-8cc3-1ed669cf4108\n",
" quant_job:\n",
" type: command\n",
" inputs:\n",
" list_file: input_list.txt\n",
" data:\n",
" path: ${{parent.jobs.data_prep_job.outputs.quant_data}}\n",
" model:\n",
" path: ${{parent.jobs.convert_job.outputs.dlc}}\n",
" outputs:\n",
" quant_model: ${{parent.outputs.pipeline_job_quant_model}}\n",
" component: azureml:azureml_anonymous:04f22241-0aea-45a9-949a-3204e076edc2\n",
" info_job:\n",
" type: command\n",
" inputs:\n",
" model:\n",
" path: ${{parent.jobs.quant_job.outputs.quant_model}}\n",
" outputs:\n",
" info: ${{parent.outputs.pipeline_job_info}}\n",
" component: azureml:azureml_anonymous:d0525a94-b93d-4301-8cc3-1ed669cf4108\n",
"services:\n",
" Tracking:\n",
" endpoint: azureml://westus2.api.azureml.ms/mlflow/v1.0/subscriptions/c8b7f913-60fb-4759-a310-fc5630e56f99/resourceGroups/snpe-aml-rg/providers/Microsoft.MachineLearningServices/workspaces/snpe-aml-workspace?\n",
" job_service_type: Tracking\n",
" Studio:\n",
" endpoint: https://ml.azure.com/runs/strong_pasta_wgv176q8tw?wsid=/subscriptions/c8b7f913-60fb-4759-a310-fc5630e56f99/resourcegroups/snpe-aml-rg/workspaces/snpe-aml-workspace&tid=72f988bf-86f1-41af-91ab-2d7cd011db47\n",
" job_service_type: Studio\n",
"compute: azureml:snpe-compute\n",
"status: Preparing\n",
"creation_context:\n",
" created_at: '2023-03-01T01:23:51.345924+00:00'\n",
" created_by: Chris Lovett\n",
" created_by_type: User\n",
"experiment_name: quantization_test_run\n",
"properties:\n",
" mlflow.source.git.repoURL: git@ssh.dev.azure.com:v3/msresearch/archai/image_segmentation\n",
" mlflow.source.git.branch: main\n",
" mlflow.source.git.commit: 6819790b2144d222d3f24870e213728eeebb11e9\n",
" azureml.git.dirty: 'True'\n",
" azureml.DevPlatv2: 'true'\n",
" azureml.runsource: azureml.PipelineRun\n",
" runSource: MFE\n",
" runType: HTTP\n",
" azureml.parameters: '{}'\n",
" azureml.continue_on_step_failure: 'False'\n",
" azureml.continue_on_failed_optional_input: 'True'\n",
" azureml.defaultComputeName: snpe-compute\n",
" azureml.defaultDataStoreName: workspaceblobstore\n",
" azureml.pipelineComponent: pipelinerun\n",
"id: azureml:/subscriptions/c8b7f913-60fb-4759-a310-fc5630e56f99/resourceGroups/snpe-aml-rg/providers/Microsoft.MachineLearningServices/workspaces/snpe-aml-workspace/jobs/strong_pasta_wgv176q8tw\n",
"\n"
]
}
],
"source": [
"print(pipeline_job)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "archai",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "799abcba35f70097d02fca042963180a03ec3451fe1b7671ac5d22383cd0232c"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Просмотреть файл

@ -1 +0,0 @@
azure-ai-ml==1.5.0a20230215003

Просмотреть файл

@ -1,87 +0,0 @@
from azure.iot.device import IoTHubDeviceClient, MethodResponse
from utils import Adb, spawn
import json
import platform
import time
from threading import Thread
class SnpeClient:
def __init__(self, device, iot_hub_name, iot_resource_group):
self.device = device
self.iot_hub_name = iot_hub_name
self.iot_resource_group = iot_resource_group
self.adb = Adb()
self.connected = False
def connect(self):
if platform.system() == 'Windows':
az = 'az.cmd'
else:
az = 'az'
rc, stdout, stderr = spawn([az, 'iot', 'hub', 'device-identity', 'connection-string', 'show',
'--device-id', self.device, '--hub-name', self.iot_hub_name, '--resource-group', self.iot_resource_group])
if rc != 0:
print(stderr)
self.connected = False
config = json.loads(stdout)
self.connection_string = config['connectionString']
# Instantiate the client
self.client = IoTHubDeviceClient.create_from_connection_string(self.connection_string)
try:
# Attach the handler to the client
self.client.on_method_request_received = self.method_request_handler
self.connected = True
return True
except:
# In the event of failure, clean up
self.client.shutdown()
self.connected = False
return False
def start_thread(self):
self.stdin_thread = Thread(target=self.run, args=())
self.stdin_thread.daemon = True
self.stdin_thread.start()
def run(self):
print(f"running thread for device {self.device}...")
while self.connected:
time.sleep(1)
# Define the handler for method requests
def method_request_handler(self, method_request):
if method_request.name == "ls":
# run an adb command on the device.
path = '/'
if 'path' in method_request.payload:
path = method_request.payload['path']
print(f"device {self.device} is running ls {path}")
result = self.adb.ls(self.device, path)
# Create a method response indicating the method request was resolved
resp_status = 200
resp_payload = {"Response": result}
method_response = MethodResponse(method_request.request_id, resp_status, resp_payload)
elif method_request.name == "shutdown":
print(f"device {self.device} is shutting down...")
self.connected = False
# Create a method response indicating the method request was resolved
resp_status = 200
resp_payload = {"Response": "Shutting down device " + self.device}
method_response = MethodResponse(method_request.request_id, resp_status, resp_payload)
else:
# Create a method response indicating the method request was for an unknown method
resp_status = 404
resp_payload = {"Response": "Unknown method"}
method_response = MethodResponse(method_request.request_id, resp_status, resp_payload)
# Send the method response
self.client.send_method_response(method_response)

Просмотреть файл

@ -1,107 +0,0 @@
import os
import sys
import mlflow
import argparse
from onnxruntime import InferenceSession
from utils import spawn
def _get_input_layout(shape):
# snpe-onnx-to-dlc supported input layouts are:
# NCHW, NHWC, NFC, NCF, NTF, TNF, NF, NC, F, NONTRIVIAL
# N = Batch, C = Channels, H = Height, W = Width, F = Feature, T = Time
if shape[0] == 3:
# then the RGB channel is first, so we are NCHW
return 'NCHW'
elif shape[-1] == 3:
return 'NHWC'
else:
raise Exception(f"Cannot figure out input layout from shape: {shape}")
def main():
"""Main function of the script."""
# input and output arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="path to onnx file to convert to .dlc")
parser.add_argument("--output", type=str, help="path to resulting converted model")
args = parser.parse_args()
model = args.model
output_path = args.output
output_dir = os.path.dirname(output_path)
print(" ".join(f"{k}={v}" for k, v in vars(args).items()))
print("input model:", model)
print("output:", output_path)
if not model or not os.path.exists(model):
raise Exception(f'### Error: no file found at: {model}')
if output_dir == '':
output_dir = '.'
else:
os.makedirs(output_dir, exist_ok=True)
# Start Logging
mlflow.start_run()
print(f"Found mlflow tracking Uri: {mlflow.tracking.get_tracking_uri()}")
sess = InferenceSession(model, providers=['CPUExecutionProvider'])
if len(sess._sess.inputs_meta) > 1:
raise Exception("Cannot handle models with more than one input")
if len(sess._sess.outputs_meta) > 1:
raise Exception("Cannot handle models more than one output")
input_meta = sess._sess.inputs_meta[0]
output_meta = sess._sess.outputs_meta[0]
print(f"==> Converting model {model} to .dlc...")
shape = input_meta.shape
if len(shape) == 4:
shape = shape[1:] # trim off batch dimension
layout = _get_input_layout(shape)
input_shape = ",".join([str(i) for i in input_meta.shape])
output_dlc = output_path
# snpe-onnx-to-dlc changes the model input to NHWC.
dlc_shape = shape if layout == 'NHWC' else [shape[1], shape[2], shape[0]]
mlflow.set_tag("input_shape", dlc_shape)
command = ["snpe-onnx-to-dlc", "-i", model,
"-d", input_meta.name, input_shape,
"--input_layout", input_meta.name, layout,
"--out_node", output_meta.name,
"-o", output_dlc]
print(" ".join(command))
rc, stdout, stderr = spawn(command)
print("shape: ", dlc_shape)
print("stdout:")
print("-------")
print(stdout)
print("")
print("stderr:")
print("-------")
print(stderr)
if "INFO_CONVERSION_SUCCESS" in stderr:
print("==> Conversion successful!")
else:
print("==> Conversion failed!")
sys.exit(1)
# Stop Logging
mlflow.end_run()
if __name__ == "__main__":
main()

Просмотреть файл

@ -1,111 +0,0 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import cv2
import numpy as np
import glob
import os
import sys
import tqdm
from shutil import rmtree
DEVICE_WORKING_DIR = "/data/local/tmp"
class DataGenerator():
def __init__(self, root, img_size, subset='quant', count=1000, transpose=None):
self.img_size = img_size
self.root = root
self.subset = subset
self.transpose = transpose
all_seg_files = sorted(glob.glob(os.path.join(self.root, '*_seg.png')))
if len(all_seg_files) == 0:
print("### no *_seg.png files found in {}".format(self.root))
sys.exit(1)
# use first 10000 images for quantization and last 10000 images for test
assert subset in ['quant', 'test']
if subset == 'quant':
self.seg_files = all_seg_files[0:1000]
elif subset == 'test':
self.seg_files = all_seg_files[len(all_seg_files) - count:]
self.img_files = [s.replace("_seg.png", ".png") for s in self.seg_files]
def __len__(self):
return len(self.img_files)
def __call__(self):
num_imgs = len(self.img_files)
assert num_imgs > 0
indices = np.arange(num_imgs)
for idx in indices:
img_file = self.img_files[idx]
img = cv2.imread(img_file)[..., ::-1] # BGR to RGB
img = cv2.resize(img, self.img_size, interpolation=cv2.INTER_LINEAR)
if self.transpose:
img = img.transpose(self.transpose)
yield os.path.basename(img_file), (img / 255).astype(np.float32)
def create_dataset(src_root, dst_root, subset, shape, count, trans=None):
print(f"Creating {subset} dataset of {count} images with input shape {shape}...")
image_size = (shape[0], shape[1])
device_working_dir = DEVICE_WORKING_DIR
os.makedirs(dst_root, exist_ok=True)
data_gen = DataGenerator(src_root, image_size, subset, count, trans)
file_list = []
with tqdm.tqdm(total=len(data_gen)) as pbar:
for fname, img in data_gen():
filename = fname.replace('.png', '.bin')
path = os.path.join(dst_root, filename)
file_list.append(filename)
img.tofile(path)
pbar.update(1)
with open(os.path.join(dst_root, 'input_list.txt'), 'w') as f:
for fname in file_list:
f.write(fname)
f.write('\n')
with open(os.path.join(dst_root, 'input_list_for_device.txt'), 'w') as f:
for fname in file_list:
device_path = device_working_dir + '/data/test/' + os.path.basename(fname)
f.write(device_path)
f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create the quant and test datasets')
parser.add_argument('--input', help='Location of the original input images ' +
'(default INPUT_DATASET environment variable')
parser.add_argument('--count', '-c', type=int, help='Number of images in the test dataset folder ' +
'(default 1000)', default=1000)
parser.add_argument('--dim', '-d', type=int, help='New dimension for the images ' +
'(assumes square dimensions, default 256)', default=256)
parser.add_argument('--transpose', '-t', help="Apply image transpose of '(2, 0 1)'", action="store_true")
args = parser.parse_args()
dataset = args.input
if not dataset:
dataset = os.getenv("INPUT_DATASET")
if not dataset:
print("please provide --input or set your INPUT_DATASET environment variable")
sys.exit(1)
count = args.count
dim = args.dim
transpose = args.transpose
if transpose:
transpose = (2, 0, 1)
else:
transpose = None
dst_root = 'data/quant'
if os.path.isdir(dst_root):
rmtree(dst_root)
create_dataset(dataset, dst_root, 'quant', (dim, dim), count, transpose)
create_dataset(dataset, dst_root, 'test', (dim, dim), count, transpose)

Просмотреть файл

@ -1,51 +0,0 @@
import os
import argparse
import logging
import mlflow
import zipfile
from create_data import create_dataset
def main():
"""Main function of the script."""
# input and output arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, help="path to input data (zip file)")
parser.add_argument("--output", type=str, help="path to resulting quantization data")
args = parser.parse_args()
input_path = args.data
data_dir = 'data'
output_dir = args.output
print(" ".join(f"{k}={v}" for k, v in vars(args).items()))
print("input data:", input_path)
print("input output:", output_dir)
if not input_path or not os.path.exists(input_path):
raise Exception(f'### Error: no .zip file found in the {input_path} folder')
os.makedirs(data_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
# Start Logging
mlflow.start_run()
print('unzipping the data...')
with zipfile.ZipFile(input_path, 'r') as zip_ref:
zip_ref.extractall(data_dir)
print('Converting the .png images to SNPE quantization .bin files...')
create_dataset(data_dir, output_dir, 'quant', (256, 256), 1000)
for name in os.listdir(output_dir):
print(name)
# Stop Logging
mlflow.end_run()
if __name__ == "__main__":
main()

Просмотреть файл

@ -1,82 +0,0 @@
# This script connects local adb devices to Azure IOT hub and
# implements some method calls to control the device.
# This assumes you have the Android ADB tool installed, and
# the SNPE SDK in an SNPE_ROOT, and the Olive2 SDK.
from utils import Adb, spawn
import json
import platform
import time
from client import SnpeClient
class DeviceManager:
def __init__(self):
config = json.load(open(".azureml/config.json"))
self.subscription = config['subscription_id']
self.iot_resource_group = config['iot_resource_group']
self.iot_hub_name = config['iot_hub_name']
self.location = config['location']
def register(self):
if platform.system() == 'Windows':
az = 'az.cmd'
else:
az = 'az'
rc, stdout, stderr = spawn([az, 'iot', 'hub', 'connection-string', 'show',
'--hub-name', self.iot_hub_name, '--resource-group', self.iot_resource_group])
if rc != 0:
raise Exception(stderr)
config = json.loads(stdout)
iot_hub_connection_string = config['connectionString']
adb = Adb()
devices = adb.get_devices()
for device in devices:
print(f"Checking Device Twin for android device {device}...")
rc, stdout, stderr = spawn([az, 'iot', 'hub', 'device-identity', 'show',
'--device-id', device,
'--hub-name', self.iot_hub_name, '--resource-group', self.iot_resource_group])
if rc != 0:
if 'DeviceNotFound' in stderr:
print(f"Creating Device Twin for android device {device}...")
rc, stdout, stderr = spawn([az, 'iot', 'hub', 'device-identity', 'create',
'--device-id', device,
'--hub-name', self.iot_hub_name, '--resource-group', self.iot_resource_group])
if rc != 0:
raise Exception(stderr)
else:
raise Exception(stderr)
return devices
# Run the handler for each device attached to this local machine.
mgr = DeviceManager()
devices = mgr.register()
clients = []
for device in devices:
client = SnpeClient(device, mgr.iot_hub_name, mgr.iot_resource_group)
if client.connect():
client.start_thread()
clients.append(client)
while len(clients):
for c in list(clients):
if not c.connected:
clients.remove(c)
time.sleep(1)
print("all clients have terminated")

Просмотреть файл

@ -1,74 +0,0 @@
import os
import sys
import argparse
import mlflow
from utils import spawn
def macs_to_float(macs):
if macs.endswith('B'):
return float(macs[:-1]) * 1e9
if macs.endswith('M'):
return float(macs[:-1]) * 1e6
elif macs.endswith('K'):
return float(macs[:-1]) * 1e3
else:
return float(macs)
def main():
# input and output arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="path to dlc model we need to get info about")
parser.add_argument("--output", type=str, help="the output text file to write to")
args = parser.parse_args()
model_path = args.model
output_path = args.output
print("input model:", model_path)
print("output path:", output_path)
print("tracking url:", mlflow.tracking.get_tracking_uri())
if not model_path or not os.path.exists(model_path):
raise Exception(f'### Error: no input model found at: {model_path}')
# Start Logging
mlflow.start_run()
rc, stdout, stderr = spawn(['snpe-dlc-info', '--input_dlc', model_path])
print("stdout:")
print("-------")
print(stdout)
print("")
print("stderr:")
print("-------")
print(stderr)
with open(output_path, 'w') as f:
f.write(stdout)
params_prefix = 'Total parameters'
macs_prefix = 'Total MACs per inference'
memory_prefix = 'Est. Steady-State Memory Needed to Run:'
# Parse stdout to get the info we want to log as metrics
for line in stdout.split('\n'):
if line.startswith(params_prefix):
params = line.split(':')[1].split('(')[0].strip()
mlflow.log_metric(params_prefix, float(params))
elif line.startswith(macs_prefix):
macs = line.split(':')[1].split('(')[0].strip()
mlflow.log_metric(macs_prefix, macs_to_float(macs))
elif line.startswith(memory_prefix):
mem = line.split(':')[1].strip().split(' ')[0].strip()
mlflow.log_metric('Estimated Memory Needed to Run', float(mem))
# Stop Logging
mlflow.end_run()
if __name__ == "__main__":
main()

Просмотреть файл

@ -1,80 +0,0 @@
import os
import sys
import argparse
import mlflow
from utils import spawn
def main():
"""Main function of the script."""
# input and output arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, help="path to quantization dataset folder")
parser.add_argument("--model", type=str, help="path to model we need to quantize")
parser.add_argument("--list_file", type=str, help="the name of the input_list.txt file")
parser.add_argument("--output", type=str, help="place to write the quantized model")
args = parser.parse_args()
input_path = args.data
list_file = args.list_file
model_path = args.model
output_path = args.output
output_dir = os.path.dirname(output_path)
if output_dir == '':
output_dir = '.'
print("input data:", input_path)
print("input model:", model_path)
print("output model:", output_path)
if not input_path or not os.path.exists(input_path):
raise Exception(f'### Error: no input data found at: {input_path}')
if not input_path or not os.path.exists(model_path):
raise Exception(f'### Error: no input model found at: {model_path}')
if not output_path or not os.path.exists(output_dir):
raise Exception(f'### Error: no output path found at: {output_dir}')
os.makedirs(output_dir, exist_ok=True)
# Start Logging
mlflow.start_run()
input_list = os.path.join(input_path, list_file)
# snpe-dlc-quant needs full paths in the input list.
with open(input_list, 'r') as f:
lines = f.readlines()
lines = [os.path.join(input_path, line) for line in lines]
input_list = 'input_list.txt' # not in the read-only input_path folder.
with open(input_list, 'w') as f:
f.writelines(lines)
rc, stdout, stderr = spawn(['snpe-dlc-quant', '--input_dlc', model_path,
'--input_list', input_list,
'--output_dlc', output_path,
'--use_enhanced_quantizer'])
print("stdout:")
print("-------")
print(stdout)
print("")
print("stderr:")
print("-------")
print(stderr)
if "[INFO] Saved quantized dlc" in stderr:
print("==> Quantization successful!")
else:
print("==> Quantization failed!")
sys.exit(1)
# Stop Logging
mlflow.end_run()
if __name__ == "__main__":
main()

Просмотреть файл

@ -1,35 +0,0 @@
import subprocess
def spawn(cmd, env=None, cwd=None, check=False):
out = subprocess.run(cmd, env=env, cwd=cwd, check=check,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
returncode = out.returncode
stdout = out.stdout.decode("utf-8")
stderr = out.stderr.decode("utf-8")
return returncode, stdout, stderr
class Adb:
def __init__(self):
self.devices = []
def get_devices(self):
rc, stdout, stderr = spawn(["adb", "devices"])
if rc != 0:
raise Exception("Error: {}".format(stderr))
result = []
for line in stdout.splitlines():
if not line.startswith("List of devices attached") and line:
result.append(line.split('\t')[0])
self.devices = result
return result
def ls(self, device, path):
rc, stdout, stderr = spawn(["adb", "-s", device, "shell", "ls", path])
if rc != 0:
raise Exception("Error: {}".format(stderr))
return stdout.splitlines()