Merge pull request #256 from microsoft/pabuehle_integ_tests
Similarity notebook integration test
This commit is contained in:
Коммит
1293e29f04
|
@ -1,4 +1,4 @@
|
|||
# Unit Test steps
|
||||
# Unit and integration test steps
|
||||
steps:
|
||||
|
||||
- bash: |
|
||||
|
@ -14,8 +14,8 @@ steps:
|
|||
- bash: |
|
||||
source activate cv
|
||||
python -m ipykernel install --user --name cv --display-name "cv"
|
||||
pytest --durations 100 tests/unit --junitxml=junit/test-unitttest.xml
|
||||
displayName: 'Run Unit tests'
|
||||
pytest --durations 100 tests --junitxml=junit/test-unitttest.xml
|
||||
displayName: 'Run unit and (only on Linux GPU) integration tests'
|
||||
|
||||
- bash: |
|
||||
echo Remove Conda Environment
|
||||
|
|
|
@ -83,6 +83,7 @@
|
|||
"from fastai.vision import models, open_image\n",
|
||||
"from ipywebrtc import CameraStream, ImageRecorder\n",
|
||||
"from ipywidgets import HBox, Label, Layout, Widget\n",
|
||||
"import scrapbook as sb\n",
|
||||
"\n",
|
||||
"from utils_cv.common.data import data_path\n",
|
||||
"from utils_cv.common.gpu import which_processor\n",
|
||||
|
@ -201,8 +202,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Predicted label: coffee_mug (conf = 0.68)\n",
|
||||
"Took 0.05646324157714844 sec\n"
|
||||
"Predicted label: coffee_mug (confidence = 0.68)\n",
|
||||
"Took 0.02240896224975586 sec\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -211,7 +212,9 @@
|
|||
"\n",
|
||||
"# Use the model to predict the class label\n",
|
||||
"_, ind, prob = learn.predict(im)\n",
|
||||
"print(f\"Predicted label: {labels[ind]} (conf = {prob[ind]:.2f})\")\n",
|
||||
"predicted_label = labels[ind]\n",
|
||||
"predicted_confidence = prob[ind]\n",
|
||||
"print(f\"Predicted label: {predicted_label} (confidence = {predicted_confidence:.2f})\")\n",
|
||||
"\n",
|
||||
"# Show prediction time. Note the first prediction usually takes longer because of the model loading\n",
|
||||
"print(f\"Took {time.time()-start_time} sec\")"
|
||||
|
@ -327,6 +330,54 @@
|
|||
"# Stop the model and webcam\n",
|
||||
"Widget.close_all()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/scrapbook.scrap.text+json": {
|
||||
"data": "coffee_mug",
|
||||
"encoder": "text",
|
||||
"name": "predicted_label",
|
||||
"version": 1
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"scrapbook": {
|
||||
"data": true,
|
||||
"display": false,
|
||||
"name": "predicted_label"
|
||||
}
|
||||
},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/scrapbook.scrap.json+json": {
|
||||
"data": 0.6800199747085571,
|
||||
"encoder": "json",
|
||||
"name": "predicted_confidence",
|
||||
"version": 1
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"scrapbook": {
|
||||
"data": true,
|
||||
"display": false,
|
||||
"name": "predicted_confidence"
|
||||
}
|
||||
},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Preserve some of the notebook outputs\n",
|
||||
"sb.glue(\"predicted_label\", predicted_label)\n",
|
||||
"sb.glue(\"predicted_confidence\", float(predicted_confidence))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -45,8 +45,10 @@
|
|||
"source": [
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"sys.path.append(\"../../\")\n",
|
||||
"\n",
|
||||
"import scrapbook as sb\n",
|
||||
"\n",
|
||||
"sys.path.append(\"../../\")\n",
|
||||
"from utils_cv.classification.widget import AnnotationWidget\n",
|
||||
"from utils_cv.classification.data import Urls\n",
|
||||
"from utils_cv.common.data import unzip_url"
|
||||
|
@ -101,7 +103,7 @@
|
|||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "591a04ffcf97454b9bb060ddb9274441",
|
||||
"model_id": "67d4f7c8d01d4eacbd437b0c1637bd6e",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
|
@ -142,7 +144,7 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Writing example_annotation.csv\n"
|
||||
"Overwriting example_annotation.csv\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -254,7 +256,7 @@
|
|||
"x: ImageList\n",
|
||||
"Image (3, 665, 499),Image (3, 665, 499),Image (3, 665, 499)\n",
|
||||
"y: MultiCategoryList\n",
|
||||
"can,can;carton,can\n",
|
||||
"can,can,can;carton\n",
|
||||
"Path: /data/home/pabuehle/Desktop/ComputerVision/data/fridgeObjectsTiny/can;\n",
|
||||
"\n",
|
||||
"Test: None\n"
|
||||
|
@ -286,6 +288,36 @@
|
|||
")\n",
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/scrapbook.scrap.json+json": {
|
||||
"data": 6,
|
||||
"encoder": "json",
|
||||
"name": "num_images",
|
||||
"version": 1
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"scrapbook": {
|
||||
"data": true,
|
||||
"display": false,
|
||||
"name": "num_images"
|
||||
}
|
||||
},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Preserve some of the notebook outputs\n",
|
||||
"num_images = len(data.valid) + len(data.train)\n",
|
||||
"sb.glue(\"num_images\", num_images)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -79,22 +79,21 @@
|
|||
"import os\n",
|
||||
"from pathlib import Path\n",
|
||||
"import sys\n",
|
||||
"sys.path.append(\"../../\")\n",
|
||||
"import shutil\n",
|
||||
"from tempfile import TemporaryDirectory\n",
|
||||
"\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"# fastai\n",
|
||||
"import scrapbook as sb\n",
|
||||
"\n",
|
||||
"import fastai\n",
|
||||
"from fastai.metrics import accuracy\n",
|
||||
"from fastai.vision import (\n",
|
||||
" # data-modules\n",
|
||||
" CategoryList, DatasetType, get_image_files, ImageList, imagenet_stats,\n",
|
||||
" # model-modules\n",
|
||||
" cnn_learner, models, ClassificationInterpretation, \n",
|
||||
")\n",
|
||||
"\n",
|
||||
"sys.path.append(\"../../\")\n",
|
||||
"from utils_cv.classification.model import (\n",
|
||||
" IMAGENET_IM_SIZE as IMAGE_SIZE,\n",
|
||||
" TrainMetricsRecorder,\n",
|
||||
|
@ -869,6 +868,18 @@
|
|||
"# Finally, show the number of repetitions you went through the negative mining\n",
|
||||
"print(f\"Ran {len(interpretations)} time(s)\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Preserve some of the notebook outputs\n",
|
||||
"sb.glue(\"train_acc\", train_acc)\n",
|
||||
"sb.glue(\"valid_acc\", valid_acc)\n",
|
||||
"sb.glue(\"negative_sample_ids\", negative_sample_ids)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,110 @@
|
|||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
import os
|
||||
import glob
|
||||
import papermill as pm
|
||||
import pytest
|
||||
import scrapbook as sb
|
||||
import shutil
|
||||
|
||||
from utils_cv.common.gpu import linux_with_gpu
|
||||
|
||||
# Parameters
|
||||
KERNEL_NAME = "cv"
|
||||
OUTPUT_NOTEBOOK = "output.ipynb"
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_01_notebook_run(classification_notebooks):
|
||||
if linux_with_gpu():
|
||||
notebook_path = classification_notebooks["01_training_introduction"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(PM_VERSION=pm.__version__),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 10
|
||||
assert nb_output.scraps["training_accuracies"].data[-1] > 0.70
|
||||
assert nb_output.scraps["validation_accuracy"].data > 0.70
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_02_notebook_run(classification_notebooks):
|
||||
if linux_with_gpu():
|
||||
notebook_path = classification_notebooks["02_multilabel_classification"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(PM_VERSION=pm.__version__),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 10
|
||||
assert nb_output.scraps["training_accuracies"].data[-1] > 0.80
|
||||
assert nb_output.scraps["acc_hl"].data > 0.80
|
||||
assert nb_output.scraps["acc_zol"].data > 0.4
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_03_notebook_run(classification_notebooks):
|
||||
if linux_with_gpu():
|
||||
notebook_path = classification_notebooks["03_training_accuracy_vs_speed"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(PM_VERSION=pm.__version__),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 12
|
||||
assert nb_output.scraps["training_accuracies"].data[-1] > 0.80
|
||||
assert nb_output.scraps["validation_accuracy"].data > 0.80
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_11_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
if linux_with_gpu():
|
||||
notebook_path = classification_notebooks["11_exploring_hyperparameters"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(
|
||||
PM_VERSION=pm.__version__,
|
||||
|
||||
# Speed up testing since otherwise would take ~12 minutes on V100
|
||||
DATA=[tiny_ic_data_path],
|
||||
REPS=1,
|
||||
IM_SIZES=[60,100],
|
||||
),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert nb_output.scraps["nr_elements"].data == 6
|
||||
assert nb_output.scraps["max_accuray"].data > 0.5
|
||||
assert nb_output.scraps["min_accuray"].data < 0.5
|
||||
assert nb_output.scraps["max_duration"].data > 1.2 * nb_output.scraps["min_duration"].data
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_12_notebook_run(classification_notebooks):
|
||||
if linux_with_gpu():
|
||||
notebook_path = classification_notebooks["12_hard_negative_sampling"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(PM_VERSION=pm.__version__),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["train_acc"].data) == 12
|
||||
assert nb_output.scraps["train_acc"].data[-1] > 0.80
|
||||
assert nb_output.scraps["valid_acc"].data[-1] > 0.80
|
||||
assert len(nb_output.scraps["negative_sample_ids"].data) > 0
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
import papermill as pm
|
||||
import pytest
|
||||
import scrapbook as sb
|
||||
from torch.cuda import is_available
|
||||
|
||||
from utils_cv.common.gpu import linux_with_gpu
|
||||
|
||||
# Parameters
|
||||
KERNEL_NAME = "cv"
|
||||
OUTPUT_NOTEBOOK = "output.ipynb"
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_01_notebook_run(similarity_notebooks):
|
||||
if linux_with_gpu():
|
||||
notebook_path = similarity_notebooks["01"]
|
||||
pm.execute_notebook(
|
||||
notebook_path,
|
||||
OUTPUT_NOTEBOOK,
|
||||
parameters=dict(PM_VERSION=pm.__version__),
|
||||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert nb_output.scraps["median_rank"].data <= 10
|
|
@ -8,6 +8,7 @@ import os
|
|||
import glob
|
||||
import papermill as pm
|
||||
import pytest
|
||||
import scrapbook as sb
|
||||
import shutil
|
||||
|
||||
# Unless manually modified, python3 should be
|
||||
|
@ -27,6 +28,10 @@ def test_00_notebook_run(classification_notebooks):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert nb_output.scraps["predicted_label"].data == "coffee_mug"
|
||||
assert nb_output.scraps["predicted_confidence"].data > 0.5
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_01_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
@ -43,6 +48,9 @@ def test_01_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 1
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_02_notebook_run(classification_notebooks, multilabel_ic_data_path):
|
||||
|
@ -59,6 +67,9 @@ def test_02_notebook_run(classification_notebooks, multilabel_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 1
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_03_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
@ -70,7 +81,7 @@ def test_03_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
PM_VERSION=pm.__version__,
|
||||
DATA_PATH=tiny_ic_data_path,
|
||||
MULTILABEL=False,
|
||||
MODEL_TYPE="fast_inference", # options: ['fast_inference', 'high_performance', 'small_size']
|
||||
MODEL_TYPE="fast_inference",
|
||||
EPOCHS_HEAD=1,
|
||||
EPOCHS_BODY=1,
|
||||
IM_SIZE=50,
|
||||
|
@ -78,6 +89,9 @@ def test_03_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["training_accuracies"].data) == 1
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_10_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
@ -92,6 +106,9 @@ def test_10_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert nb_output.scraps["num_images"].data == 6
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_11_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
@ -110,6 +127,9 @@ def test_11_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert nb_output.scraps["nr_elements"].data == 1
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_12_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
@ -127,6 +147,9 @@ def test_12_notebook_run(classification_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["train_acc"].data) == 1
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def skip_test_21_notebook_run(classification_notebooks, tiny_ic_data_path):
|
||||
|
|
|
@ -1,9 +1,14 @@
|
|||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
|
||||
from utils_cv.common.gpu import which_processor
|
||||
from utils_cv.common.gpu import linux_with_gpu, which_processor
|
||||
|
||||
|
||||
def test_which_processor():
|
||||
# Naive test: Just run the function to see whether it works or does not work
|
||||
which_processor()
|
||||
|
||||
|
||||
def test_linux_with_gpu():
|
||||
# Naive test: Just run the function to see whether it works or does not work
|
||||
linux_with_gpu()
|
||||
|
|
|
@ -25,6 +25,11 @@ def test_00_notebook_run(similarity_notebooks):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
assert len(nb_output.scraps["query_feature"].data) == 512
|
||||
assert min(nb_output.scraps["query_feature"].data) >= 0
|
||||
assert min([dist for (path,dist) in nb_output.scraps["distances"].data]) < 1e-3
|
||||
|
||||
|
||||
@pytest.mark.notebooks
|
||||
def test_01_notebook_run(similarity_notebooks, tiny_ic_data_path):
|
||||
|
@ -42,7 +47,3 @@ def test_01_notebook_run(similarity_notebooks, tiny_ic_data_path):
|
|||
kernel_name=KERNEL_NAME,
|
||||
)
|
||||
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
|
||||
|
||||
# Conservative assert: check if rank is smaller than or equal 5
|
||||
# (Typically mediam_rank should be 1, and random rank is 50)
|
||||
#assert nb_output.scraps['median_rank'].data <= 5
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# Licensed under the MIT License.
|
||||
import platform
|
||||
|
||||
from torch.cuda import current_device, get_device_name, is_available
|
||||
|
||||
|
@ -11,3 +12,10 @@ def which_processor():
|
|||
print(f"Fast.ai (Torch) is using GPU: {get_device_name(device_nr)}")
|
||||
else:
|
||||
print("Cuda is not available. Fast.ai/Torch is using CPU")
|
||||
|
||||
|
||||
def linux_with_gpu():
|
||||
"""Returns if machine is running an Linux OS and has a GPU"""
|
||||
is_linux = platform.system().lower() == "linux"
|
||||
has_gpu = is_available()
|
||||
return is_linux and has_gpu
|
||||
|
|
Загрузка…
Ссылка в новой задаче