Merge branch 'main' into dev/setup-refactor
This commit is contained in:
Коммит
8c4a3d1192
|
@ -1,3 +1,6 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
name: Integrated Test
|
||||
|
||||
on: [push]
|
||||
|
@ -16,36 +19,33 @@ jobs:
|
|||
with:
|
||||
python-version: 3.6.10
|
||||
|
||||
# # to cache and restore the download data and python packages.
|
||||
# # still have bug (Cache not found for input keys)
|
||||
# # leave the issue to another PR
|
||||
# - uses: actions/cache@v2
|
||||
# id: cache
|
||||
# with:
|
||||
# path: |
|
||||
# /home/runner/work/nn-Meter/data/testmodels
|
||||
# /home/runner/.nn_meter/data
|
||||
# # /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6
|
||||
# key: ${{hashFiles('setup.py')}}-${{hashFiles('tests/requirements.txt') }}
|
||||
# # location remider:
|
||||
# # pwd: /home/runner/work/nn-Meter/nn-Meter
|
||||
# # python /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
# # pythonLocation: /opt/hostedtoolcache/Python/3.6.10/x64
|
||||
# # LD_LIBRARY_PATH: /opt/hostedtoolcache/Python/3.6.10/x64/lib
|
||||
# # package path: /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
env:
|
||||
cache-name: download-cache
|
||||
with:
|
||||
path: |
|
||||
~/.nn_meter
|
||||
/home/runner/work/nn-Meter/data/testmodels
|
||||
/opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
key: Dependencies-${{hashFiles('setup.py')}}-Data-${{hashFiles('nn_meter/configs/predictors.yaml')}}
|
||||
|
||||
- name: Install dependencies
|
||||
# if: steps.cache.outputs.cache-hit != 'true'
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
pip install tensorflow==1.15.0
|
||||
pip install onnx==1.9.0
|
||||
pip install torch==1.9.0
|
||||
pip install torch==1.7.1
|
||||
pip install torchvision==0.8.2
|
||||
pip install onnx-simplifier
|
||||
|
||||
- name: Install nn-Meter
|
||||
run: pip install .
|
||||
run: pip install -U .
|
||||
|
||||
- name: Integration test
|
||||
run: python tests/integration_test.py
|
||||
|
||||
- name: Diff result with reference
|
||||
run: diff tests/test_result.txt tests/reference_result.txt
|
||||
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
name: Integrated Test for Torch Model
|
||||
|
||||
on: [push]
|
||||
|
@ -16,36 +19,33 @@ jobs:
|
|||
with:
|
||||
python-version: 3.6.10
|
||||
|
||||
# # to cache and restore the download data and python packages.
|
||||
# # still have bug (Cache not found for input keys)
|
||||
# # leave the issue to another PR
|
||||
# - uses: actions/cache@v2
|
||||
# id: cache
|
||||
# with:
|
||||
# path: |
|
||||
# /home/runner/work/nn-Meter/data/testmodels
|
||||
# /home/runner/.nn_meter/data
|
||||
# # /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6
|
||||
# key: ${{hashFiles('setup.py')}}-${{hashFiles('tests/requirements.txt') }}
|
||||
# # location remider:
|
||||
# # pwd: /home/runner/work/nn-Meter/nn-Meter
|
||||
# # python /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
# # pythonLocation: /opt/hostedtoolcache/Python/3.6.10/x64
|
||||
# # LD_LIBRARY_PATH: /opt/hostedtoolcache/Python/3.6.10/x64/lib
|
||||
# # package path: /opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
env:
|
||||
cache-name: download-cache
|
||||
with:
|
||||
path: |
|
||||
~/.nn_meter
|
||||
/home/runner/work/nn-Meter/data/testmodels
|
||||
/opt/hostedtoolcache/Python/3.6.10/x64/lib/python3.6/site-packages
|
||||
key: Dependencies-${{hashFiles('setup.py')}}-Data-${{hashFiles('nn_meter/configs/predictors.yaml')}}
|
||||
|
||||
- name: Install dependencies
|
||||
# if: steps.cache.outputs.cache-hit != 'true'
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
pip install tensorflow==1.15.0
|
||||
pip install onnx==1.9.0
|
||||
pip install torch==1.9.0
|
||||
pip install torchvision==0.10.0
|
||||
pip install torch==1.7.1
|
||||
pip install torchvision==0.8.2
|
||||
pip install onnx-simplifier
|
||||
|
||||
- name: Install nn-Meter
|
||||
run: pip install .
|
||||
run: pip install -U .
|
||||
|
||||
- name: Integration test
|
||||
run: python tests/integration_test_torch.py
|
||||
|
||||
- name: Diff result with reference
|
||||
run: diff tests/test_result_torch.txt tests/reference_result_torch.txt
|
||||
|
||||
|
|
|
@ -35,12 +35,12 @@ Then simply run the following pip install in an environment that has `python >=
|
|||
pip install .
|
||||
```
|
||||
|
||||
nn-Meter is a latency predictor of models with type of tensorflow, pytorch, onnx, nn-meter IR graph and [NNI IR graph](https://github.com/microsoft/nni). To use nn-Meter for specific model type, you also need to install corresponding pacakges. The well tested versions are listed below:
|
||||
nn-Meter is a latency predictor of models with type of tensorflow, pytorch, onnx, nn-meter IR graph and [NNI IR graph](https://github.com/microsoft/nni). To use nn-Meter for specific model type, you also need to install corresponding required pacakges. The well tested versions are listed below:
|
||||
|
||||
| Testing Model Tpye | Requirments |
|
||||
| :-------------------: | :------------------------------------------------: |
|
||||
| Tensorflow | `tensorflow==1.15.0` |
|
||||
| Torch | `onnx==1.9.0`, `torch==1.9.0`, `torchvision==0.10.0` |
|
||||
| Torch | `torch==1.7.1`, `torchvision==0.8.2`, `onnx==1.9.0`, `onnx-simplifier==0.3.6` |
|
||||
| Onnx | `onnx==1.9.0` |
|
||||
| nn-Meter IR graph | --- |
|
||||
| NNI IR graph | `nni==2.4` |
|
||||
|
@ -63,7 +63,7 @@ Here is a summary of supported inputs of the two methods.
|
|||
| :---------------: | :---------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------: |
|
||||
| Tensorflow | Checkpoint file dumped by `tf.saved_model()` and endwith `.pb` | Checkpoint file dumped by `tf.saved_model` and endwith `.pb` |
|
||||
| Torch | Models in `torchvision.models` | Object of `torch.nn.Module` |
|
||||
| Onnx | Checkpoint file dumped by `onnx.save()` and endwith `.onnx` | Checkpoint file dumped by `onnx.save()` or model loaded by `onnx.load()` |
|
||||
| Onnx | Checkpoint file dumped by `torch.onnx.export()` or `onnx.save()` and endwith `.onnx` | Checkpoint file dumped by `onnx.save()` or model loaded by `onnx.load()` |
|
||||
| nn-Meter IR graph | Json file in the format of [nn-Meter IR Graph](./docs/input_models.md#nnmeter-ir-graph) | `dict` object following the format of [nn-Meter IR Graph](./docs/input_models.md#nnmeter-ir-graph) |
|
||||
| NNI IR graph | - | NNI IR graph object |
|
||||
|
||||
|
|
14
demo.py
14
demo.py
|
@ -69,10 +69,10 @@ def test_pytorch_models(args, predictor):
|
|||
alexnet = models.alexnet()
|
||||
vgg16 = models.vgg16()
|
||||
squeezenet = models.squeezenet1_0()
|
||||
densenet = models.densenet161()
|
||||
inception = models.inception_v3()
|
||||
densenet161 = models.densenet161()
|
||||
inception_v3 = models.inception_v3()
|
||||
googlenet = models.googlenet()
|
||||
shufflenet = models.shufflenet_v2_x1_0()
|
||||
shufflenet_v2 = models.shufflenet_v2_x1_0()
|
||||
mobilenet_v2 = models.mobilenet_v2() # noqa: F841
|
||||
resnext50_32x4d = models.resnext50_32x4d()
|
||||
wide_resnet50_2 = models.wide_resnet50_2()
|
||||
|
@ -82,10 +82,10 @@ def test_pytorch_models(args, predictor):
|
|||
models.append(resnet18)
|
||||
models.append(vgg16)
|
||||
models.append(squeezenet)
|
||||
models.append(densenet)
|
||||
models.append(inception)
|
||||
models.append(densenet161)
|
||||
models.append(inception_v3)
|
||||
models.append(googlenet)
|
||||
models.append(shufflenet)
|
||||
models.append(shufflenet_v2)
|
||||
models.append(resnext50_32x4d)
|
||||
models.append(wide_resnet50_2)
|
||||
models.append(mnasnet)
|
||||
|
@ -195,4 +195,4 @@ if __name__ == "__main__":
|
|||
if args.getir:
|
||||
get_nnmeter_ir(args)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -14,12 +14,12 @@ Then simply run the following pip install in an environment that has `python >=
|
|||
pip install .
|
||||
```
|
||||
|
||||
nn-Meter is a latency predictor of models with type of tensorflow, pytorch, onnx, nn-meter IR graph and [NNI IR graph](https://github.com/microsoft/nni). To use nn-Meter for specific model type, you also need to install corresponding pacakges. The well tested versions are listed below:
|
||||
nn-Meter is a latency predictor of models with type of tensorflow, pytorch, onnx, nn-meter IR graph and [NNI IR graph](https://github.com/microsoft/nni). To use nn-Meter for specific model type, you also need to install corresponding required pacakges. The well tested versions are listed below:
|
||||
|
||||
| Testing Model Tpye | Requirments |
|
||||
| :-------------------: | :------------------------------------------------: |
|
||||
| Tensorflow | `tensorflow==1.15.0` |
|
||||
| Torch | `onnx==1.9.0`, `torch==1.9.0`, `torchvision==0.10.0` |
|
||||
| Torch | `torch==1.7.1`, `torchvision==0.8.2`, `onnx==1.9.0`, `onnx-simplifier==0.3.6` |
|
||||
| Onnx | `onnx==1.9.0` |
|
||||
| nn-Meter IR graph | --- |
|
||||
| NNI IR graph | `nni==2.4` |
|
||||
|
@ -32,7 +32,7 @@ The stable version of wheel binary pacakge will be released soon.
|
|||
## "Hello World" example on torch model
|
||||
nn-Meter is an accurate inference latency predictor for DNN models on diverse edge devices. nn-Meter supports tensorflow pb-file, onnx file, torch model and nni IR model for latency prediction.
|
||||
|
||||
Here is an example script to predict latency for Resnet18 in torch. To run the example, package `torch`, `torchvision` and `onnx` are required. The well tested versions are `torch==1.9.0`, `torchvision==0.10.0` and `onnx==1.9.0`.
|
||||
Here is an example script to predict latency for Resnet18 in torch. To run the example, package `torch`, `torchvision` and `onnx` are required. The well tested versions are `torch==1.7.1`, `torchvision==0.8.2`, `onnx==1.9.0` and `onnx-simplifier==0.3.6`.
|
||||
|
||||
```python
|
||||
from nn_meter import load_latency_predictor
|
||||
|
|
|
@ -1 +1,3 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .converter import TorchConverter
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from nn_meter.utils.utils import try_import_onnx, try_import_torch
|
||||
import tempfile
|
||||
from onnxsim import simplify
|
||||
from nn_meter.ir_converters.onnx_converter import OnnxConverter
|
||||
|
||||
|
||||
|
@ -114,7 +117,11 @@ class OnnxBasedTorchConverter(OnnxConverter):
|
|||
fp.seek(0)
|
||||
model = onnx.load(fp, load_external_data=False)
|
||||
|
||||
super().__init__(model)
|
||||
# convert model
|
||||
model_simp, check = simplify(model)
|
||||
|
||||
assert check, "Simplified ONNX model could not be validated"
|
||||
super().__init__(model_simp)
|
||||
|
||||
|
||||
TorchConverter = OnnxBasedTorchConverter
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
nni_type_map = {
|
||||
"aten::mul": "mul",
|
||||
"aten::floordiv": "div",
|
||||
|
|
|
@ -50,10 +50,10 @@ def model_file_to_graph(filename: str, model_type: str, input_shape=(1, 3, 224,
|
|||
'alexnet': 'models.alexnet()',
|
||||
'vgg16': 'models.vgg16()',
|
||||
'squeezenet': 'models.squeezenet1_0()',
|
||||
'densenet': 'models.densenet161()',
|
||||
'inception': 'models.inception_v3()',
|
||||
'densenet161': 'models.densenet161()',
|
||||
'inception_v3': 'models.inception_v3()',
|
||||
'googlenet': 'models.googlenet()',
|
||||
'shufflenet': 'models.shufflenet_v2_x1_0()',
|
||||
'shufflenet_v2': 'models.shufflenet_v2_x1_0()',
|
||||
'mobilenet_v2': 'models.mobilenet_v2()', # noqa: F841
|
||||
'resnext50_32x4d': 'models.resnext50_32x4d()',
|
||||
'wide_resnet50_2': 'models.wide_resnet50_2()',
|
||||
|
|
|
@ -198,7 +198,7 @@ class nnMeter:
|
|||
self.kd.load_graph(graph)
|
||||
|
||||
py = nn_predict(self.kernel_predictors, self.kd.kernels) # in unit of ms
|
||||
logging.info(f"Predict latency: {py}(ms)")
|
||||
logging.info(f"Predict latency: {py} ms")
|
||||
return py
|
||||
|
||||
|
||||
|
@ -300,4 +300,4 @@ def nn_meter_cli():
|
|||
# Usage 3
|
||||
if args.getir:
|
||||
get_nnmeter_ir(args)
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .predictors.utils import latency_metrics
|
||||
from .predictors.utils import latency_metrics
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import pickle
|
||||
import os
|
||||
from glob import glob
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
|
@ -48,7 +48,7 @@ def try_import_onnx(require_version = "1.9.0"):
|
|||
exit()
|
||||
|
||||
|
||||
def try_import_torch(require_version = "1.9.0"):
|
||||
def try_import_torch(require_version = "1.7.1"):
|
||||
try:
|
||||
import torch
|
||||
if version.parse(torch.__version__) != version.parse(require_version):
|
||||
|
@ -77,4 +77,13 @@ def try_import_torchvision_models():
|
|||
except ImportError:
|
||||
logging.error(f'You have not install the torchvision package, please install torchvision and try again.')
|
||||
exit()
|
||||
|
||||
|
||||
|
||||
def try_import_onnxsim():
|
||||
try:
|
||||
from onnxsim import simplify
|
||||
return simplify
|
||||
except ImportError:
|
||||
logging.error(f'You have not install the onnx-simplifier package, please install onnx-simplifier and try again.')
|
||||
exit()
|
||||
|
2
setup.py
2
setup.py
|
@ -1,3 +1,5 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
|
@ -13,6 +15,7 @@ __model_suffix__ = {
|
|||
"onnx": ".onnx"
|
||||
}
|
||||
|
||||
|
||||
# check package status
|
||||
def check_package_status():
|
||||
try:
|
||||
|
@ -20,6 +23,7 @@ def check_package_status():
|
|||
except NotImplementedError:
|
||||
logging.error("Meets ERROR when checking 'nn-meter -h'")
|
||||
|
||||
|
||||
# check predictors list
|
||||
def get_predictors():
|
||||
try:
|
||||
|
@ -47,6 +51,7 @@ def parse_latency_info(info):
|
|||
latency_list = list(map(lambda x: re.sub('\s*', '', x).split(':'), latency_info))
|
||||
return latency_list
|
||||
|
||||
|
||||
# integration test to predict model latency
|
||||
def integration_test(model_type, url, ppath, output_name = "tests/test_result.txt"):
|
||||
"""
|
||||
|
@ -95,7 +100,8 @@ def check_getir_module(model_type, ppath):
|
|||
try:
|
||||
_ = subprocess.check_output(['nn-meter', 'getir', f'--{model_type}', model])
|
||||
_ = subprocess.check_output(['nn-meter', 'getir', f'--{model_type}', model, '--output', f'temp.json'])
|
||||
os.remove('temp.json')
|
||||
if os.path.exists('temp.json'):
|
||||
os.remove('temp.json')
|
||||
break # test just one file to avoid time cosuming
|
||||
except NotImplementedError:
|
||||
logging.error("Meets ERROR when checking getir --{model_type} {ppath}'")
|
||||
|
@ -108,25 +114,25 @@ if __name__ == "__main__":
|
|||
# check tensorflow model
|
||||
integration_test(
|
||||
model_type='tensorflow',
|
||||
url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/pb_models.zip",
|
||||
ppath = "../data/testmodels/pb",
|
||||
output_name = output_name
|
||||
url="https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/pb_models.zip",
|
||||
ppath="../data/testmodels/pb",
|
||||
output_name=output_name
|
||||
)
|
||||
|
||||
# check onnx model
|
||||
integration_test(
|
||||
model_type='onnx',
|
||||
url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/onnx_models.zip",
|
||||
ppath = "../data/testmodels/onnx",
|
||||
output_name = output_name
|
||||
url="https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/onnx_models.zip",
|
||||
ppath="../data/testmodels/onnx",
|
||||
output_name=output_name
|
||||
)
|
||||
|
||||
# check nnmeter-ir graph model
|
||||
integration_test(
|
||||
model_type='nn-meter-ir',
|
||||
url = "https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/ir_graphs.zip",
|
||||
ppath = "../data/testmodels/ir",
|
||||
output_name = output_name
|
||||
url="https://github.com/microsoft/nn-Meter/releases/download/v1.0-data/ir_graphs.zip",
|
||||
ppath="../data/testmodels/ir",
|
||||
output_name=output_name
|
||||
)
|
||||
|
||||
# check getir
|
||||
|
|
|
@ -6,37 +6,9 @@ from tqdm import tqdm
|
|||
import logging
|
||||
import subprocess
|
||||
from nn_meter import download_from_url
|
||||
from integration_test import *
|
||||
|
||||
|
||||
# check package status
|
||||
def check_package_status():
|
||||
try:
|
||||
output1 = subprocess.check_output(['nn-meter', '-h'])
|
||||
except NotImplementedError:
|
||||
logging.error("Meets ERROR when checking 'nn-meter -h'")
|
||||
|
||||
# check predictors list
|
||||
def get_predictors():
|
||||
try:
|
||||
predictors_list = subprocess.check_output(['nn-meter', '--list-predictors'])
|
||||
except NotImplementedError:
|
||||
logging.error("Meets ERROR when checking 'nn-meter --list-predictors'")
|
||||
|
||||
predictors_list = predictors_list.decode('utf-8')
|
||||
pattern = re.compile(r'(?<=\[Predictor\] ).+(?=\n)')
|
||||
predictors_info = pattern.findall(predictors_list)
|
||||
predictors = list(map(lambda x: re.sub('\s*', '', x).split(':version='), predictors_info))
|
||||
return predictors
|
||||
|
||||
|
||||
def parse_latency_info(info):
|
||||
# (nn-Meter) [RESULT] predict latency for shufflenetv2_0.onnx: 5.423898780782251 ms
|
||||
pattern = re.compile(r'(?<=\[RESULT\] predict latency for ).*(?= ms\n)')
|
||||
latency_info = pattern.findall(info)
|
||||
latency_list = list(map(lambda x: re.sub('\s*', '', x).split(':'), latency_info))
|
||||
return latency_list
|
||||
|
||||
|
||||
# integration test to predict model latency
|
||||
def integration_test_torch(model_type, model_list, output_name = "tests/test_result_torch.txt"):
|
||||
"""
|
||||
|
@ -56,7 +28,7 @@ def integration_test_torch(model_type, model_list, output_name = "tests/test_res
|
|||
for pred_name, pred_version in get_predictors():
|
||||
try:
|
||||
since = time.time()
|
||||
# print(f'nn-meter --{model_type} {model} --predictor {pred_name} --predictor-version {pred_version}')
|
||||
# print(f'nn-meter --torchvision ' + " ".join(model_list) + f' --predictor {pred_name} --predictor-version {pred_version}')
|
||||
result = subprocess.check_output(['nn-meter', f'--torchvision'] + model_list + ['--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}'])
|
||||
runtime = time.time() - since
|
||||
except NotImplementedError:
|
||||
|
@ -76,8 +48,8 @@ if __name__ == "__main__":
|
|||
integration_test_torch(
|
||||
model_type='torch',
|
||||
model_list=[
|
||||
'resnet18', 'alexnet', 'vgg16', 'squeezenet', 'densenet', 'inception', 'googlenet',
|
||||
'shufflenet', 'mobilenet_v2', 'resnext50_32x4d', 'wide_resnet50_2', 'mnasnet']
|
||||
'resnet18', 'alexnet', 'vgg16', 'squeezenet', 'densenet161', 'inception_v3', 'googlenet',
|
||||
'shufflenet_v2', 'mobilenet_v2', 'resnext50_32x4d', 'wide_resnet50_2', 'mnasnet']
|
||||
)
|
||||
|
||||
|
|
@ -3,10 +3,10 @@ resnet18, torch, cortexA76cpu_tflite21, 1.0, 216.1971
|
|||
alexnet, torch, cortexA76cpu_tflite21, 1.0, 96.5713
|
||||
vgg16, torch, cortexA76cpu_tflite21, 1.0, 1668.5085
|
||||
squeezenet, torch, cortexA76cpu_tflite21, 1.0, 98.3766
|
||||
densenet, torch, cortexA76cpu_tflite21, 1.0, 917.2059
|
||||
inception, torch, cortexA76cpu_tflite21, 1.0, 586.301
|
||||
densenet161, torch, cortexA76cpu_tflite21, 1.0, 917.2059
|
||||
inception_v3, torch, cortexA76cpu_tflite21, 1.0, 586.2565
|
||||
googlenet, torch, cortexA76cpu_tflite21, 1.0, 167.5816
|
||||
shufflenet, torch, cortexA76cpu_tflite21, 1.0, 3.8637
|
||||
shufflenet_v2, torch, cortexA76cpu_tflite21, 1.0, 21.3181
|
||||
mobilenet_v2, torch, cortexA76cpu_tflite21, 1.0, 43.9635
|
||||
resnext50_32x4d, torch, cortexA76cpu_tflite21, 1.0, 1218.8905
|
||||
wide_resnet50_2, torch, cortexA76cpu_tflite21, 1.0, 1218.8905
|
||||
|
@ -15,10 +15,10 @@ resnet18, torch, adreno640gpu_tflite21, 1.0, 39.3235
|
|||
alexnet, torch, adreno640gpu_tflite21, 1.0, 13.1267
|
||||
vgg16, torch, adreno640gpu_tflite21, 1.0, 219.2648
|
||||
squeezenet, torch, adreno640gpu_tflite21, 1.0, 18.6742
|
||||
densenet, torch, adreno640gpu_tflite21, 1.0, 186.5604
|
||||
inception, torch, adreno640gpu_tflite21, 1.0, 129.6636
|
||||
densenet161, torch, adreno640gpu_tflite21, 1.0, 186.5604
|
||||
inception_v3, torch, adreno640gpu_tflite21, 1.0, 127.9842
|
||||
googlenet, torch, adreno640gpu_tflite21, 1.0, 32.7581
|
||||
shufflenet, torch, adreno640gpu_tflite21, 1.0, 0.5928
|
||||
shufflenet_v2, torch, adreno640gpu_tflite21, 1.0, 5.4239
|
||||
mobilenet_v2, torch, adreno640gpu_tflite21, 1.0, 9.9207
|
||||
resnext50_32x4d, torch, adreno640gpu_tflite21, 1.0, 230.961
|
||||
wide_resnet50_2, torch, adreno640gpu_tflite21, 1.0, 230.961
|
||||
|
@ -27,10 +27,10 @@ resnet18, torch, adreno630gpu_tflite21, 1.0, 49.4287
|
|||
alexnet, torch, adreno630gpu_tflite21, 1.0, 16.8673
|
||||
vgg16, torch, adreno630gpu_tflite21, 1.0, 286.5998
|
||||
squeezenet, torch, adreno630gpu_tflite21, 1.0, 21.095
|
||||
densenet, torch, adreno630gpu_tflite21, 1.0, 193.0796
|
||||
inception, torch, adreno630gpu_tflite21, 1.0, 162.5346
|
||||
densenet161, torch, adreno630gpu_tflite21, 1.0, 193.0796
|
||||
inception_v3, torch, adreno630gpu_tflite21, 1.0, 161.3187
|
||||
googlenet, torch, adreno630gpu_tflite21, 1.0, 37.9612
|
||||
shufflenet, torch, adreno630gpu_tflite21, 1.0, 0.6881
|
||||
shufflenet_v2, torch, adreno630gpu_tflite21, 1.0, 5.5792
|
||||
mobilenet_v2, torch, adreno630gpu_tflite21, 1.0, 9.9861
|
||||
resnext50_32x4d, torch, adreno630gpu_tflite21, 1.0, 277.2564
|
||||
wide_resnet50_2, torch, adreno630gpu_tflite21, 1.0, 277.2564
|
||||
|
@ -39,10 +39,10 @@ resnet18, torch, myriadvpu_openvino2019r2, 1.0, 21.8861
|
|||
alexnet, torch, myriadvpu_openvino2019r2, 1.0, 20.8138
|
||||
vgg16, torch, myriadvpu_openvino2019r2, 1.0, 211.3735
|
||||
squeezenet, torch, myriadvpu_openvino2019r2, 1.0, 11.0523
|
||||
densenet, torch, myriadvpu_openvino2019r2, 1.0, 144.1079
|
||||
inception, torch, myriadvpu_openvino2019r2, 1.0, 59.3455
|
||||
densenet161, torch, myriadvpu_openvino2019r2, 1.0, 144.1079
|
||||
inception_v3, torch, myriadvpu_openvino2019r2, 1.0, 59.3455
|
||||
googlenet, torch, myriadvpu_openvino2019r2, 1.0, 17.0897
|
||||
shufflenet, torch, myriadvpu_openvino2019r2, 1.0, 1.9972
|
||||
shufflenet_v2, torch, myriadvpu_openvino2019r2, 1.0, 20.8282
|
||||
mobilenet_v2, torch, myriadvpu_openvino2019r2, 1.0, 22.0187
|
||||
resnext50_32x4d, torch, myriadvpu_openvino2019r2, 1.0, 130.4383
|
||||
wide_resnet50_2, torch, myriadvpu_openvino2019r2, 1.0, 130.4383
|
||||
|
|
Загрузка…
Ссылка в новой задаче