Feature: support nn-Meter builder (#43)
This commit is contained in:
Родитель
dd74162fb3
Коммит
76d7f881e1
|
@ -1,2 +1,3 @@
|
|||
data/** filter=lfs diff=lfs merge=lfs -text
|
||||
data filter=lfs diff=lfs merge=lfs -text
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:88f226ba3dc223b79b95a1791174669d2b1e6b42564c8997b3e5bfd141ac717a
|
||||
size 123
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
path: |
|
||||
~/.nn_meter
|
||||
/home/runner/work/nn-Meter/data/testmodels
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test.py')}}
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test/test_latency_predictor.py')}}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -42,11 +42,11 @@ jobs:
|
|||
run: pip install -U .
|
||||
|
||||
- name: Integration test
|
||||
run: python tests/integration_test.py
|
||||
run: python tests/integration_test/test_latency_predictor.py
|
||||
|
||||
- name: Diff result with reference
|
||||
run: diff tests/data/reference_result.txt tests/test_result.txt
|
||||
run: diff tests/integration_test/data/reference_result.txt tests/integration_test/test_result.txt
|
||||
|
||||
- name: clean env
|
||||
run: rm tests/test_result.txt
|
||||
run: rm tests/integration_test/test_result.txt
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
path: |
|
||||
~/.nn_meter
|
||||
/home/runner/work/nn-Meter/data/testmodels
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test.py')}}
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test/test_latency_predictor.py')}}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -40,10 +40,10 @@ jobs:
|
|||
run: pip install -U .
|
||||
|
||||
- name: Integration test
|
||||
run: python tests/integration_test_torch.py --apply-nni
|
||||
run: python tests/integration_test/test_latency_predictor_torch.py --apply-nni
|
||||
|
||||
- name: Diff result with reference
|
||||
run: diff tests/data/reference_result_nni_based_torch.txt tests/test_result_nni_based_torch.txt
|
||||
run: diff tests/integration_test/data/reference_result_nni_based_torch.txt tests/integration_test/test_result_nni_based_torch.txt
|
||||
|
||||
- name: clean env
|
||||
run: rm tests/test_result_nni_based_torch.txt
|
||||
run: rm tests/integration_test/test_result_nni_based_torch.txt
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
path: |
|
||||
~/.nn_meter
|
||||
/home/runner/work/nn-Meter/data/testmodels
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test.py')}}
|
||||
key: ${{hashFiles('nn_meter/configs/predictors.yaml')}}-${{hashFiles('tests/integration_test/test_latency_predictor.py')}}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -41,10 +41,10 @@ jobs:
|
|||
run: pip install -U .
|
||||
|
||||
- name: Integration test
|
||||
run: python tests/integration_test_torch.py --apply-onnx
|
||||
run: python tests/integration_test/test_latency_predictor_torch.py --apply-onnx
|
||||
|
||||
- name: Diff result with reference
|
||||
run: diff tests/data/reference_result_onnx_based_torch.txt tests/test_result_onnx_based_torch.txt
|
||||
run: diff tests/integration_test/data/reference_result_onnx_based_torch.txt tests/integration_test/test_result_onnx_based_torch.txt
|
||||
|
||||
- name: clean env
|
||||
run: rm tests/test_result_onnx_based_torch.txt
|
||||
run: rm tests/integration_test/test_result_onnx_based_torch.txt
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
Note: This is an beta (preview) version which is still under refining.
|
||||
|
||||
**nn-Meter** is a novel and efficient system to accurately predict the inference latency of DNN models on diverse edge devices. The key idea is dividing a whole model inference into kernels, i.e., the execution units of fused operators on a device, and conduct kernel-level prediction. We currently evaluate four popular platforms on a large dataset of 26k models. It achieves 99.0% (mobile CPU), 99.1% (mobile Adreno 640 GPU), 99.0% (mobile Adreno 630 GPU), and 83.4% (Intel VPU) prediction accuracy.
|
||||
|
||||
The current supported hardware and inference frameworks:
|
||||
|
@ -73,7 +71,7 @@ Here is a summary of supported inputs of the two methods.
|
|||
| Tensorflow | Checkpoint file dumped by `tf.saved_model()` and end with `.pb` | Checkpoint file dumped by `tf.saved_model` and end with `.pb` |
|
||||
| Torch | Models in `torchvision.models` | Object of `torch.nn.Module` |
|
||||
| Onnx | Checkpoint file dumped by `torch.onnx.export()` or `onnx.save()` and end with `.onnx` | Checkpoint file dumped by `onnx.save()` or model loaded by `onnx.load()` |
|
||||
| nn-Meter IR graph | Json file in the format of [nn-Meter IR Graph](./docs/input_models.md#nnmeter-ir-graph) | `dict` object following the format of [nn-Meter IR Graph](./docs/input_models.md#nnmeter-ir-graph) |
|
||||
| nn-Meter IR graph | Json file in the format of [nn-Meter IR Graph](./docs/predictor/input_models.md#nnmeter-ir-graph) | `dict` object following the format of [nn-Meter IR Graph](./docs/predictor/input_models.md#nnmeter-ir-graph) |
|
||||
| NNI IR graph | - | NNI IR graph object |
|
||||
|
||||
In both methods, users could appoint predictor name and version to target a specific hardware platform (device). Currently, nn-Meter supports prediction on the following four configs:
|
||||
|
|
|
@ -0,0 +1,469 @@
|
|||
# Build Kernel Latency Predictor
|
||||
|
||||
## Step1: Config Sampling From Prior Distribution
|
||||
|
||||
To learn the relationship between configurations and latency, we need to generate a training set (i.e., variously configured kernels and the latencies) for regression. While it's unfeasible to sample and measure all the configurations for all kernels, a direct method is random sampling.
|
||||
|
||||
The first step is sampling configuration values from the prior distribution, which is inferred from the existing models. Based on our kernel model, there are generally 6 configuration values, including height and width (`"HW"`), input channel (`"CIN"`), output channel (`"COUT"`), kernel size (`"KERNEL_SIZE"`), strides (`"STRIDES"`), and kernel size for pooling layer (`"POOL_STRIDES"`). We sampling the configuration based on the prior distribution and adapt the value to common valid values. That is, height and weight are verified to value from `[1, 3, 7, 14, 28, 56, 112, 224]`, kernel size to `[1, 3, 5, 7]`, strides to `[1, 2, 4]`, and kernel size for pooling layer to `[2, 3]`. We stored the prior knowledge of existing models as csv files in `nn_meter/builder/kernel_predictor_builder/data_sampler/prior_config_lib/`.
|
||||
|
||||
## Step 2: Generate and Profile Kernel Model by Configs
|
||||
|
||||
The second step is generating and profiling kernel model by configurations. Currently, the kernel blocks and corresponding configurations supported by nn-Meter include:
|
||||
|
||||
(conv related kernels)
|
||||
|
||||
- `"conv_bn_relu"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_bn_relu6"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_bn"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_relu"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_relu6"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_hswish"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_block"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"conv_bn_hswish"`: `HW`, `CIN`, `COUT`, `KERNEL_SIZE`, `STRIDES`
|
||||
|
||||
(dwconv related kernels)
|
||||
- `"dwconv_bn"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_relu"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_relu6"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_bn_relu"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_bn_relu6"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_block"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
- `"dwconv_bn_hswish"`: `HW`, `CIN`, `KERNEL_SIZE`, `STRIDES`
|
||||
|
||||
(other kernels)
|
||||
|
||||
- `"maxpool_block"`: `HW`, `CIN`, `KERNEL_SIZE`, `POOL_STRIDES`
|
||||
- `"avgpool_block"`: `HW`, `CIN`, `KERNEL_SIZE`, `POOL_STRIDES`
|
||||
- `"fc_block"`: `CIN`, `COUT`
|
||||
- `"concat_block"`: `HW`, `CIN1`, `CIN2`, `CIN3`, `CIN4`
|
||||
- `"split_block"`: `HW`, `CIN`
|
||||
- `"channel_shuffle"`: `HW`, `CIN`
|
||||
- `"se_block"`: `HW`, `CIN`
|
||||
- `"globalavgpool_block"`: `HW`, `CIN`
|
||||
- `"bn_relu"`: `HW`, `CIN`
|
||||
- `"bn_block"`: `HW`, `CIN`
|
||||
- `"hswish_block"`: `HW`, `CIN`
|
||||
- `"relu_block"`: `HW`, `CIN`
|
||||
- `"add_relu"`: `HW`, `CIN`
|
||||
- `"add_block"`: `HW`, `CIN`
|
||||
|
||||
|
||||
The first and second step are implemented by `nn_meter.builder.nn_meter_builder.sample_and_profile_kernel_data`. Here is an example:
|
||||
|
||||
``` python
|
||||
from nn_meter.builder.nn_meter_builder import sample_and_profile_kernel_data
|
||||
kernel_type = "conv_bn_relu"
|
||||
backend = "tflite_cpu"
|
||||
|
||||
# init predictor builder with prior data sampler
|
||||
kernel_data = sample_and_profile_kernel_data(kernel_type, init_sample_num = 1000, backend, sampling_mode='prior', mark='prior')
|
||||
```
|
||||
|
||||
The generated models are saved in `<workspace-path>/predictor_build/models`, and the configuration information and profiled results are dumped in json file to `<workspace-path>/predictor_build/results/<kernel_type>.json` and `<workspace-path>/predictor_build/results/profiled_<kernel_type>.json`.
|
||||
|
||||
The method `sample_and_profile_kernel_data` is composed by three main steps, `generate_config_sample`, `convert_models`, `profile_models`. Here is an example as a decomposition of `sample_and_profile_kernel_data`. Users could choose the decomposed interfaces if needed.
|
||||
``` python
|
||||
from nn_meter.builder.kernel_predictor_builder import generate_config_sample
|
||||
from nn_meter.builder import convert_models
|
||||
# sample configs for kernel and generate models
|
||||
models = generate_config_sample(kernel_type, sample_num, mark=mark,
|
||||
sampling_mode=sampling_mode, configs=configs)
|
||||
|
||||
# connect to backend, run models and get latency
|
||||
backend = connect_backend(backend_name="tflite-cpu")
|
||||
|
||||
# convert the model to the needed format by backend, in order to increase efficiency when profiling on device.
|
||||
models = convert_models(backend, saved_name, broken_point_mode=True)
|
||||
|
||||
# run models with given backend and return latency of testcase models
|
||||
profiled_results = profile_models(backend, models, mode='predbuild', save_name="xxx.json", have_converted=True)
|
||||
```
|
||||
|
||||
Note: for kernels related to conv and dwconv, our experiment results have shown that all kernels containing one conv layer or one dwconv layer have almost the same latency results. Thus in nn-Meter, all kernels containing one conv or dwconv layer shares the same kernel predictor.
|
||||
|
||||
## Step 3: Initialize Kernel Latency Predictor
|
||||
|
||||
After preparing the training data, we construct a random forest regression model as the kernel latency predictor. Here is an example:
|
||||
|
||||
```python
|
||||
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data
|
||||
|
||||
kernel_type = "conv_bn_relu"
|
||||
backend = "tflite_cpu"
|
||||
error_threshold = 0.1
|
||||
|
||||
# extract training feature and target from profiled results
|
||||
cfgs_path = os.path.join("<workspace-path>", "predictor_build", "results", "conv_bn_relu.json")
|
||||
lats_path = os.path.join("<workspace-path>", "predictor_build", "results", "profiled_conv_bn_relu.json")
|
||||
kernel_data = (cfgs_path, lats_path)
|
||||
|
||||
# build latency predictor
|
||||
predictor, acc10, error_configs = build_predictor_by_data(
|
||||
kernel_type, kernel_data, backend, error_threshold=error_threshold, mark="prior",
|
||||
save_path=os.path.join("<workspace-path>", "predictor_build", "results")
|
||||
)
|
||||
logging.info(f'Iteration 0: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
```
|
||||
|
||||
In the implementation, the tuple of `kernel_data` includes items of `cfgs_path` and `lats_path`, indicating the config information and latency information respectively. `cfgs_path` and `lats_path` accept both json file path string or dictionary of models. In addition, if the config information and latency information are in the same data holder, users could directly specify `kernel_data = cfgs_path`.
|
||||
|
||||
In the feature extraction part, the all configs are extracted as features. Besides, for kernels containing `conv`, `dwconv` or `fc` layer, flops and number of parameters are also extracted as features for latency prediction.
|
||||
|
||||
After feature extraction, nn-Meter build the predictor by method `build_predictor_by_data`. The regression predictor are implemented by `sklearn.ensemble.RandomForestRegressor`. All data are split into training and validation data as 8:2. We have evaluate the prediction performance by the Root Mean Square Error (RMSE) and the relative Root Mean SQuare Percentage Error (RMSPE), that are the standard metrics in regression. Besides, we report the $\pm 5\%$ and $\pm 10\%$ accuracy, that are the percentage of models with predicted latency within the corresponding error bound relative to the measured latency. Smaller `RMSE/RMSPE` and larger $\pm 5\%$ and $\pm 10\%$ accuracy suggest better performance.
|
||||
|
||||
The output of `build_predictor_by_data` includes the predictor class, $\pm 10\%$ accuracy and training data items with larger error than `error_threshold`. The large error data are used for the next step.
|
||||
|
||||
## Step 4: Adaptive Data Sampling
|
||||
|
||||
In the paper of nn-Meter, we have observe that the configuration of kernel size (KERNEL_SIZE), height and width (HW), input channel (CIN), and output channel (COUT) show the noe-linearity pattern on our measured devices. Instead, HW and COUT exhibit the staircase pattern, in which Conv with two different HW/COUT may have the same latency. These non-linearities reflect the complexities in hardware optimizations.
|
||||
|
||||
Therefore, main idea to improve the predictor performance is to sample the most beneficial data from the kernel configuration space. It covers 1) the configuration range in CNN design, and 2) hardware-crucial configurations that reflect the hardware optimizaitons and can significantly impact the prediction accuracy.
|
||||
|
||||
We propose adaptive data sampling to generate fine-grained channel number sampling for data with large prediction errors. For each data, we fix all the other dimensions except the channel number $C_0$. we random sample $M$ data from $[0.4 \times C_0, 1.2 \times C_0]$. For example, for Conv with (HW=56, KERNEL_SIZE=3, STRIDES=1, CIN=24, COUT=64), we fix the HW, KERNEL_SIZE, STRIDES dimension, and sample $M$ new CIN, COUT from $[9, 28]$ and $[25, 76]$, respectively. The fine-grained sampling number is represented by parameter `finegrained_sample_num`.
|
||||
|
||||
The iterative process continues until the predictor accuracy meets user's requirements. In this part, we conduct the following steps:
|
||||
|
||||
* Build a regression model by current sampled data;
|
||||
* Locate data points in testset with large prediction error (prediction error >`large_error_threshold`, default=0.1);
|
||||
* For each data point, we perform fine-grained data sampling to generate random data around the large error data;
|
||||
* Collect fine-grained sampled data with previous data to build new predictor;
|
||||
* Conduct next iteration.
|
||||
|
||||
Here is an example for adaptive data sampling:
|
||||
```python
|
||||
for i in range(1, iteration):
|
||||
# finegrained sampling and profiling for large error data
|
||||
new_kernel_data = sample_and_profile_kernel_data(
|
||||
kernel_type, finegrained_sample_num, backend,
|
||||
sampling_mode = 'finegrained', configs=error_configs, mark=f'finegrained{i}'
|
||||
)
|
||||
|
||||
# merge finegrained data with previous data and build new regression model
|
||||
kernel_data = merge_prev_info(new_info=new_kernel_data, prev_info=kernel_data)
|
||||
predictor, acc10, error_configs = build_predictor_by_data(
|
||||
kernel_type, kernel_data, backend, error_threshold=error_threshold, mark="prior",
|
||||
save_path=os.path.join("<workspace-path>", "predictor_build", "results")
|
||||
)
|
||||
logging.keyinfo(f'Iteration {i}: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
```
|
||||
|
||||
## End-to-end Demo
|
||||
|
||||
nn-Meter have wrapped the four main steps into one method named `nn_meter.builder.build_predictor_for_kernel`. There is an example to build latency predictor for `"conv_bn_relu"` kernel:
|
||||
|
||||
```python
|
||||
# initialize builder config with workspace
|
||||
from nn_meter.builder import builder_config
|
||||
builder_config.init("path/to/workspace/folder")
|
||||
|
||||
# build latency predictor for kernel
|
||||
from nn_meter.builder import build_predictor_for_kernel
|
||||
kernel_type = "conv_bn_relu"
|
||||
backend = "tflite_cpu"
|
||||
|
||||
predictor, data = build_predictor_for_kernel(
|
||||
kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1
|
||||
)
|
||||
```
|
||||
|
||||
In the experiment of nn-Meter, we set `init_sample_num` as 1000, `finegrained_sample_num` as 10, `iteration` as 5, and `error_threshold` as 0.1.
|
||||
|
||||
nn-Meter also provided a end-to-end method for users to build a series of general latency predictors, named `nn_meter.builder.build_latency_predictor()`. This method will build predictors for all kernels in `<workspace-path>/configs/predictorbuild_config.yaml` according to their corresponding parameters. The parameters includes `INIT_SAMPLE_NUM`, `FINEGRAINED_SAMPLE_NUM`, `ITERATION`, and `ERROR_THRESHOLD`. Here is an example:
|
||||
|
||||
``` python
|
||||
# initialize builder config with workspace
|
||||
from nn_meter.builder import builder_config
|
||||
builder_config.init("path/to/workspace/folder") # initialize builder config with workspace
|
||||
|
||||
# build latency predictor for kernel
|
||||
from nn_meter.builder import build_latency_predictor
|
||||
build_latency_predictor(backend="tflite_cpu")
|
||||
```
|
||||
|
||||
# Build predictor for customized kernel
|
||||
|
||||
If users want to add new kernels to profile latency and build predictor, here are several steps to prepare and register new kernels.
|
||||
|
||||
## Prepare Customized Kernels
|
||||
|
||||
### Step 1: Prepare the Customized Kernel Class
|
||||
|
||||
nn-Meter provide API for users to customize their own kernel block. In nn-Meter, each kernel is implemented by inheriting a base class named `nn_meter.builder.nn_generator.BaseBlock`. The kernel block has a input parameter `config` to feed configuration params for the kernel. There are two attributes should be claimed, including `input_shape` and `input_tensor_shape`, as well as one method named `get_model()`. nn-Meter support both tensorflow and torch implementation for the kernel model. Users could switch the kernel implementation between tensorflow and torch by editing configuration `IMPLEMENT` in `<workspace-path>/configs/predictorbuild_config.yaml`. Here we use tensorflow implementation as an example.
|
||||
|
||||
- `input_shape` defines the dimension of one model input shape without batch size. Generally, when the input shape is 3D, `input_shape` should be`[config["HW"], config["HW"], config["CIN"]]`, and when the input shape is 1D, `input_shape` should be`[config["CIN"]]`.
|
||||
|
||||
- `input_tensor_shape` is a list defining all model inputs. In basic situation, `input_tensor_shape` should be `[input_shape]` if the kernel only has one input. If the kernel has more than one input, such as `add_relu` kernel, `input_tensor_shape` is `[input_shape, input_shape]`.
|
||||
|
||||
- `get_model` is the implementation of the kernel model and return a instance of `keras.Model` of the kernel.
|
||||
|
||||
Users could refer to the following example to learn how to write a kernel class.
|
||||
|
||||
``` python
|
||||
import tensorflow.keras as keras
|
||||
from nn_meter.builder.nn_generator import BaseBlock
|
||||
|
||||
class MyKernel(BaseBlock):
|
||||
''' This kernel is built by Conv, BN, and Relu layer, which is the same as the builtin `conv_bn_relu` block.
|
||||
'''
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, cout, kernel_size, strides):
|
||||
super().__init__()
|
||||
self.conv = keras.layers.Conv2D(
|
||||
cout,
|
||||
kernel_size=kernel_size,
|
||||
strides=strides,
|
||||
padding="same"
|
||||
)
|
||||
self.bn = keras.layers.BatchNormalization()
|
||||
self.relu = keras.layers.ReLU()
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
return Model(self.config["COUT"], self.config["KERNEL_SIZE"], self.config["STRIDES"])
|
||||
```
|
||||
|
||||
### Step 2: Collect the Prior Data and Implement Sampling Code
|
||||
|
||||
Next, users should collect the prior data and implement the config sampler for customized kernel. In nn-Meter, config sampler of each kernel is implemented by inheriting a base class named `nn_meter.builder.kernel_predictor_builder.BaseConfigSampler`. The config sampler has two methods, including `prior_config_sampling` and `finegrained_config_sampling`. The output of both methods is a list of dicts, with each dict indicates a group of configuration.
|
||||
|
||||
- `prior_config_sampling(self, sample_num)`: utilize the prior data to define the configuration sampling from the prior distribution.
|
||||
|
||||
- `finegrained_config_sampling(self, sample_num, configs)`: for data in `configs`, perform fine-grained data sampling to generate random data around the large error data.
|
||||
|
||||
Here is an example:
|
||||
|
||||
``` python
|
||||
import random
|
||||
from nn_meter.builder.kernel_predictor_builder import BaseConfigSampler
|
||||
|
||||
class MySampler(BaseConfigSampler):
|
||||
''' This sampler is for Conv related sampler. In `prior_config_sampling` method, all configs are sampled based on existing conv model. In
|
||||
`finegrained_config_sampling` method, only cin and cout are sampled around the configs in parameter `configs`.
|
||||
'''
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
new_hws = ...
|
||||
new_cins = ...
|
||||
new_couts = ...
|
||||
new_kernel_sizes = ...
|
||||
new_strides = ...
|
||||
for hw, cin, cout, kernel_size, stride in zip(new_hws, new_cins, new_couts, new_kernel_sizes, new_strides):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
'KERNEL_SIZE': kernel_size,
|
||||
'STRIDES': stride,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
def finegrained_config_sampling(self, sample_num, configs):
|
||||
ncfgs = []
|
||||
for cfg in configs:
|
||||
cins = ...
|
||||
couts = ...
|
||||
for cin, cout in zip(cins, couts):
|
||||
c = {
|
||||
'HW': cfg['HW'],
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
'KERNEL_SIZE': cfg['KERNEL_SIZE'],
|
||||
'STRIDES': cfg['STRIDES'],
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
```
|
||||
|
||||
Note: all sampled configuration value will be feed into the kernels by the input `config`. Users should follow the same notation in sampler and kernel class to transfer parameters.
|
||||
|
||||
### Step 3: Specify Kernel Feature for Training Predictor
|
||||
|
||||
Finally, users should specify the feature of kernel for training the kernel latency predictor. nn-Meter provide a base class named `nn_meter.builder.kernel_predictor_builder.BaseFeatureParser`. The feature parser has two needed attributes named `kernel_type` and `needed_config`, as well as two methods, including `get_feature_by_config(self, config_dict)` and `get_config_by_feature`.
|
||||
|
||||
- `kernel_type`: the builtin kernel type of the parser.
|
||||
|
||||
- `needed_config`: the list of all config variables. such as `["HW", "CIN", "KERNEL_SIZE", "STRIDES"]`.
|
||||
|
||||
- `get_feature_by_config(self, config_dict)`: convert the config_dict to feature list, new features based on configs is acceptable.
|
||||
|
||||
- `get_config_by_feature(self, feature)`: convert the feature to config_dict.The newly added feature should be removed.
|
||||
|
||||
Here is an example:
|
||||
|
||||
``` python
|
||||
from nn_meter.builder.kernel_predictor_builder import BaseFeatureParser
|
||||
|
||||
class MyParser(BaseFeatureParser):
|
||||
''' This parser utilized config "HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES", as well as the flops and parameter number as feature,
|
||||
which is the same parser for Conv, Dwconv and FC related kernel.
|
||||
'''
|
||||
def __init__(self, kernel_type):
|
||||
self.kernel_type = kernel_type
|
||||
self.needed_config = ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"]
|
||||
|
||||
def get_feature_by_config(self, config_dict):
|
||||
feature = [config_dict[data] for data in self.needed_config]
|
||||
hw, cin, cout, kernel_size, stride = config_dict["HW"], config_dict["CIN"], config_dict["COUT"], \
|
||||
config_dict["KERNEL_SIZE"], config_dict["STRIDES"]
|
||||
param = cout * (kernel_size * kernel_size + 1)
|
||||
flop = 2 * hw / stride * hw / stride * param
|
||||
|
||||
flop /= 2e6
|
||||
param /= 1e6
|
||||
feature.extend([flop, param])
|
||||
return feature
|
||||
|
||||
def get_config_by_feature(self, feature):
|
||||
# remove flops and params num feature from feature vector
|
||||
feature = feature[:-2]
|
||||
assert len(self.needed_config) == len(feature)
|
||||
config = {k: v for k, v in zip(self.needed_config, feature)}
|
||||
return config
|
||||
```
|
||||
|
||||
## Register kernel to nn-Meter
|
||||
|
||||
### Step 1: Create a Package for the Customized Kernel
|
||||
|
||||
nn-Meter requires users to gather all code of kernel in a package with a fixed location. A folder will be treated as a package with a `__init__.py` file added. Here is a demo of folder structure:
|
||||
|
||||
``` text
|
||||
./customized_kernel/
|
||||
├── __init__.py
|
||||
├── kernel_script.py
|
||||
├── config_sampler.py
|
||||
└── feature_parser.py
|
||||
```
|
||||
|
||||
The interface of customized kernel class, named `MyKernel`, are stored in `./customized_kernel/kernel_script.py`, and customized sampler `MySampler` in `./customized_kernel/config_sampler.py`, as well as feature parser `MyParser` in `./customized_kernel/feature_parser.py`, respectively.
|
||||
|
||||
### Step 2: Prepare Meta File
|
||||
|
||||
Create a yaml file with following keys as meta file:
|
||||
|
||||
- `builtin_name`: builtin name used in nn-Meter configuration file to call the customized kernel, such as `"mykernel"`.
|
||||
|
||||
- `package_location`: the absolute path of the package folder.
|
||||
|
||||
- `class_module`: the module of the kernel class, in this example is `kernel_script`, representing `kernel_script.py`.
|
||||
|
||||
- `class_name`: the kernel class name, in this example is `MyKernel`.
|
||||
|
||||
- `sampler_module`: the module of the kernel sampler, in this example is `config_sampler`, representing `config_sampler.py`.
|
||||
|
||||
- `sampler_name`: the kernel sampler name, in this example is `MySampler`.
|
||||
|
||||
- `parser_module`: the module of the kernel feature parser, in this example is `feature_parser`, representing `feature_parser.py`.
|
||||
|
||||
- `parser_name`: the kernel parser name, in this example is `MyParser`.
|
||||
|
||||
Following is an example of the yaml file:
|
||||
|
||||
```yaml
|
||||
builtin_name: mykernel
|
||||
package_location: /home/USERNAME/working/tftest/kernel_package
|
||||
class_module: kernel_script
|
||||
class_name: MyKernel
|
||||
sampler_module: config_sampler
|
||||
sampler_name: MySampler
|
||||
parser_module: feature_parser
|
||||
parser_name: MyParser
|
||||
```
|
||||
|
||||
### Step 3: Register Customized Kernel into nn-Meter
|
||||
|
||||
Run the following command to register customized kernel into nn-Meter:
|
||||
|
||||
``` bash
|
||||
nn-meter register --kernel path/to/meta/file
|
||||
```
|
||||
If the registration success, nn-Meter will show:
|
||||
``` text
|
||||
(nn-Meter) Successfully register kernel: mykernel
|
||||
```
|
||||
|
||||
When registering, nn-Meter will test whether the module can be imported first. If the registration success is not successful, please check the package according to the error information.
|
||||
|
||||
After backend registration, users can view all kernels by running:
|
||||
``` bash
|
||||
nn-meter --list-kernels
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported kernels: ('*' indicates customized kernels)
|
||||
(nn-Meter) [Kernel] conv_bn_relu
|
||||
(nn-Meter) [Kernel] conv_bn_relu6
|
||||
(nn-Meter) [Kernel] conv_bn
|
||||
(nn-Meter) [Kernel] conv_relu
|
||||
(nn-Meter) [Kernel] conv_relu6
|
||||
(nn-Meter) [Kernel] conv_hswish
|
||||
(nn-Meter) [Kernel] conv_block
|
||||
(nn-Meter) [Kernel] conv_bn_hswish
|
||||
(nn-Meter) [Kernel] dwconv_bn
|
||||
(nn-Meter) [Kernel] dwconv_relu
|
||||
(nn-Meter) [Kernel] dwconv_relu6
|
||||
(nn-Meter) [Kernel] dwconv_bn_relu
|
||||
(nn-Meter) [Kernel] dwconv_bn_relu6
|
||||
(nn-Meter) [Kernel] dwconv_block
|
||||
(nn-Meter) [Kernel] dwconv_bn_hswish
|
||||
(nn-Meter) [Kernel] maxpool_block
|
||||
(nn-Meter) [Kernel] avgpool_block
|
||||
(nn-Meter) [Kernel] fc_block
|
||||
(nn-Meter) [Kernel] concat_block
|
||||
(nn-Meter) [Kernel] split_block
|
||||
(nn-Meter) [Kernel] channel_shuffle
|
||||
(nn-Meter) [Kernel] se_block
|
||||
(nn-Meter) [Kernel] globalavgpool_block
|
||||
(nn-Meter) [Kernel] bn_relu
|
||||
(nn-Meter) [Kernel] bn_block
|
||||
(nn-Meter) [Kernel] hswish_block
|
||||
(nn-Meter) [Kernel] relu_block
|
||||
(nn-Meter) [Kernel] add_relu
|
||||
(nn-Meter) [Kernel] add_block
|
||||
(nn-Meter) [Kernel] * mykernel
|
||||
```
|
||||
|
||||
Note: the package of customized kernel must be retained in a fixed path as registered one. Otherwise may cause error when calling the registered module.
|
||||
|
||||
## Use the Customized Kernel in Experiment
|
||||
|
||||
After registration, users could build latency predictor for the customized kernel:
|
||||
|
||||
```python
|
||||
# initialize builder config with workspace
|
||||
from nn_meter.builder import builder_config
|
||||
builder_config.init("path/to/workspace/folder")
|
||||
|
||||
# build latency predictor for customized kernel
|
||||
from nn_meter.builder import build_predictor_for_kernel
|
||||
kernel_type = "mykernel"
|
||||
backend = "tflite_cpu"
|
||||
|
||||
predictor, data = build_predictor_for_kernel(
|
||||
kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1
|
||||
)
|
||||
```
|
||||
|
||||
## Manage the Registered Kernel
|
||||
|
||||
Users could unregister the kernel by calling its name in command:
|
||||
|
||||
``` bash
|
||||
nn-meter unregister --kernel mykernel
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Successfully unregister mykernel.
|
||||
```
|
||||
|
||||
After unregister the kernel, "mykernel" will be removed from the backend list.
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
# Use Customized Predictor for Latency Prediction
|
||||
|
||||
nn-Meter supports customized latency predictors, which can be built on users' devices. To utilize customized predictor in nn-Meter, users should provide all the necessary kernel latency predictors and a fusion rule json file. Users could use [nn-Meter builder](../builder/overview.md) to build their own latency predictors.
|
||||
|
||||
After preparing kernel latency predictors and fusion rule following guidance [here](../builder/overview.md), users should register the predictor to nn-Meter for reuse. First of all, put all kernel latency predictors and the fusion rule json file into one folder in a predefined location. The kernel latency predictors should be named by the kernel name, such as `"conv-bn-relu.pkl"`. The fusion rule json file should be named as `"fusion_rules.json"`.
|
||||
|
||||
### Step 1: Prepare Meta File
|
||||
|
||||
Create a yaml file with following keys as meta file:
|
||||
|
||||
- `name`: builtin name used in nn-Meter configuration file to call the customized predictor, such as `"my_predictor"`.
|
||||
|
||||
- `version`: the version of the customized predictor.
|
||||
|
||||
- `category`: the category of the backend platform of the predictor, such as `"cpu"` or `"gpu"`.
|
||||
|
||||
- `package_location`: the absolute path of the folder containing all predictors and fusion rule file.
|
||||
|
||||
- `kernel_predictors`: list all kernel latency predictors. Note that the name of predictor `.pkl` file should be as the same as the listed one.
|
||||
|
||||
Following is an example of the yaml file:
|
||||
|
||||
```yaml
|
||||
name: my_predictor
|
||||
version: 1.0
|
||||
category: cpu
|
||||
package_location: /home/USERNAME/working/customized_predictor
|
||||
kernel_predictors:
|
||||
- conv-bn-relu
|
||||
- dwconv-bn-relu
|
||||
- fc
|
||||
- global-avgpool
|
||||
- hswish
|
||||
- relu
|
||||
- se
|
||||
- split
|
||||
- add
|
||||
- addrelu
|
||||
- maxpool
|
||||
- avgpool
|
||||
- bn
|
||||
- bnrelu
|
||||
- channelshuffle
|
||||
- concat
|
||||
```
|
||||
|
||||
### Step 2: Register Customized Predictor into nn-Meter
|
||||
|
||||
Run the following command to register customized predictor into nn-Meter:
|
||||
|
||||
``` bash
|
||||
nn-meter register --predictor path/to/meta/file
|
||||
```
|
||||
If the registration success, nn-Meter will show:
|
||||
``` text
|
||||
(nn-Meter) Successfully register predictor my_predictor
|
||||
```
|
||||
|
||||
After predictor registration, users can view all predictors by running:
|
||||
``` bash
|
||||
nn-meter --list-predictors
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported latency predictors:
|
||||
(nn-Meter) [Predictor] cortexA76cpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno640gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno630gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] myriadvpu_openvino2019r2: version=1.0
|
||||
(nn-Meter) [Predictor] my_predictor: version=1.0
|
||||
```
|
||||
|
||||
Note: the folder of customized predictor must be retained in a fixed path as registered one. Otherwise may cause error when calling the registered module.
|
||||
|
||||
## Use the Customized Latency Predictor
|
||||
|
||||
After registration, users could get access to the customized predictor by the same way as the builtin predictors. Following [here](usage.md) to get all usages.
|
||||
|
||||
|
||||
## Manage the Registered Predcitor
|
||||
|
||||
Users can view all builtin and registered predictors by running:
|
||||
``` bash
|
||||
nn-meter --list-predictors
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported latency predictors:
|
||||
(nn-Meter) [Predictor] cortexA76cpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno640gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno630gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] myriadvpu_openvino2019r2: version=1.0
|
||||
(nn-Meter) [Predictor] my_predictor: version=1.0
|
||||
```
|
||||
|
||||
Besides, users could unregister the predictor by calling its name in command:
|
||||
|
||||
``` bash
|
||||
nn-meter unregister --predictor my_predictor
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Successfully unregister my_predictor.
|
||||
```
|
||||
|
||||
After unregister the predictor, "my_predictor" will be removed from the predictor list:
|
||||
|
||||
``` bash
|
||||
nn-meter --list-predictors
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Supported latency predictors:
|
||||
(nn-Meter) [Predictor] cortexA76cpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno640gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] adreno630gpu_tflite21: version=1.0
|
||||
(nn-Meter) [Predictor] myriadvpu_openvino2019r2: version=1.0
|
||||
```
|
|
@ -0,0 +1,54 @@
|
|||
# nn-Meter Builder
|
||||
|
||||
nn-Meter builder is an open source tool for users to build latency predictor on their own devices. There are three main parts in nn-Meter builder:
|
||||
|
||||
- **backend**: the module of connecting backends;
|
||||
|
||||
- **backend_meta**: the meta tools related to backend, such as fusion rule tester to detect fusion rules for users' backend;
|
||||
|
||||
- **kernel_predictor_builder**: the tool to build different kernel latency predictors.
|
||||
|
||||
## <span id="create-workspace"> Create Workspace </span>
|
||||
|
||||
Before using nn-Meter builder, a workspace folder should be created. In nn-Meter builder, a workspace refers to a direction to save experiment configs, test case models for detecting fusion rules, kernel models for building latency predictor, and results files for a group of experiments. Users can create a workspace folder by running the following command:
|
||||
|
||||
``` Bash
|
||||
# for TFLite platform
|
||||
nn-meter create --tflite-workspace <path/to/place/workspace/>
|
||||
|
||||
# for OpenVINO platform
|
||||
nn-meter create --openvino-workspace <path/to/place/workspace/>
|
||||
|
||||
# for customized platform
|
||||
nn-meter create --customized-workspace <backend-name> <path/to/place/workspace/>
|
||||
```
|
||||
|
||||
After running the command, a workspace folder will be created, and a series of configuration file will be placed in `<workspace-path>/configs/`. Users could open `<workspace-path>/configs/*.yaml` and edit the content to change configuration.
|
||||
|
||||
Then, users are required to initialize workspace in `builder_config` module via python binding:
|
||||
|
||||
```python
|
||||
from nn_meter.builder import builder_config
|
||||
|
||||
# initialize builder config with workspace
|
||||
builder_config.init(
|
||||
workspace_path="path/to/workspace/folder"
|
||||
) # change the text to required platform type and workspace path
|
||||
```
|
||||
|
||||
Note: after executing ``builder_config.init``, the config are loaded permanently. If users want to update a config, it's required to repeat this initialization process again.
|
||||
|
||||
## Connect Backend
|
||||
|
||||
Please refer to [prepare_backend.md](prepare_backend.md) to prepare your own backend.
|
||||
|
||||
## Detect Fusion Rule
|
||||
|
||||
Please refer to [test_fusion_rules.md](test_fusion_rules.md) to detect fusion rule.
|
||||
|
||||
## Build Kernel Latency Predictor
|
||||
|
||||
Please refer to [build_kernel_latency_predictor.md](build_kernel_latency_predictor.md) to build kernel latency predictor.
|
||||
|
||||
## Use Customized Predictor for Latency Prediction
|
||||
Please refer to [customize_predictor.md](customize_predictor.md) to utilize customized kernel latency predictors for model latency prediction.
|
|
@ -0,0 +1,334 @@
|
|||
# Setup Device and Backend
|
||||
|
||||
To profile the inference latency of a model on mobile devices, we implement several backends, where each backend consists of a software-stack inference framework and a hardware. These backends will run the model and parse the command line outputs to get latency results. To hide the measurement complexities caused by various inference frameworks and hardware devices, we provide a consistent API for backends. Currently we provide three instances on two inference frameworks, i.e., CPU backend (named `"tflite_cpu"`), GPU backend (named `"tflite_gpu"`) with TFLite platform, and VPU backend (named `"openvino_vpu"`) with OpenVINO platform.
|
||||
|
||||
Users could list all the supported backends by running
|
||||
```
|
||||
nn-meter --list-backends
|
||||
```
|
||||
|
||||
Besides of the current backends, users can implement a customized backend via nn-Meter to build latency predictors for your own devices. We allow users to install the customized backend as a builtin algorithm, so that users can easily use the backend in the same way as nn-Meter builtin backends. To use the customized backend, users can follow the [customize backend guidance](./build_customized_backend.md).
|
||||
|
||||
Next, we will introduce how to build the customized backend. The process is to setup the device and get connection to the backend.
|
||||
|
||||
|
||||
## Setup Device
|
||||
|
||||
### Prepare your Enviroment
|
||||
When you build a new backend, we recommend to use [virtualenv](https://virtualenv.pypa.io/en/latest/). We use python3.6 as our test environment.
|
||||
|
||||
``` Bash
|
||||
virtualenv openvino_env
|
||||
source openvino_env/bin/activate
|
||||
pip install -r docs/requirements/openvino_requirements.txt
|
||||
deactivate
|
||||
```
|
||||
|
||||
### TFLite Android Guide
|
||||
|
||||
TFLite is a widely-used and efficient inference framework for Android device. To setup Android device and TFLite framework, we list out the major steps as followings.
|
||||
|
||||
#### 1. Install ADB and Android SDK
|
||||
Follow [Android Guide](https://developer.android.com/studio) to install adb on your host device.
|
||||
|
||||
The easiest way is to directly download Android Studio from [this page](https://developer.android.com/studio). After installing it, you will find adb at path `$HOME/Android/Sdk/platform-tools/`.
|
||||
|
||||
|
||||
#### 2. Get TFLite Benchmark Model
|
||||
The `benchmark_model` is a tool provided by [TensorFlow Lite](https://www.tensorflow.org/lite/), which can run a model and output its latency. Because nn-Meter needs to parse the text output of `benchmark_model`, a fixed version is required. For the convenience of users, we have released two modified version of `benchmark_model` based on `tensorflow==2.1` and `tensorflow==2.7`, respectively. Users could download our modified version of `benchmark_model` from [here](https://github.com/microsoft/nn-Meter/releases/tag/v2.0-data).
|
||||
|
||||
NOTE: On a same hardware, the different versions of `benchmark_model` can result in different inference latency for a same model. We recommend users compile and build the `benchmark_model` for latest version. Users could follow [Official Guidance](https://www.tensorflow.org/lite/performance/measurement) to build benchmark tool with new version `TensorFlow Lite`. Meanwhile, the class of `LatencyParser` may need to be refined. We are working to release the source code of this modified version.
|
||||
|
||||
|
||||
#### 3. Setup Benckmark Tool on Device
|
||||
|
||||
Push the `benchmark_model` to edge device by specifying its serial (if any).
|
||||
|
||||
``` Bash
|
||||
adb [-s <device-serial>] push bazel-bin/tensorflow/lite/tools/benchmark/benchmark_model /data/local/tmp
|
||||
|
||||
# add executable permission to benchmark_model
|
||||
adb shell chmod +x /data/local/tmp/benchmark_model
|
||||
```
|
||||
|
||||
### OpenVINO VPU Guide
|
||||
|
||||
Follow [OpenVINO Installation Guide](https://docs.openvinotoolkit.org/latest/installation_guides.html) to install openvino on your host.
|
||||
|
||||
|
||||
## <span id="prepare-configuration-file"> Prepare Configuration File </span>
|
||||
|
||||
When connecting to backend, a series of configs should be manually defined by users. Firstly, users are required to create a workspace folder([Workspace Guidance](overview.md#create-workspace)). Then, a yaml file named `backend_config.yaml` will be created in `<workspace-path>/configs/`. Users can open `<workspace-path>/configs/backend_config.yaml` and edit the content to change configuration.
|
||||
|
||||
Specifically, for Android CPU or GPU backends, the required parameters include:
|
||||
|
||||
- `REMOTE_MODEL_DIR`: path to the folder (on mobile device) where temporary models will be copied to.
|
||||
- `KERNEL_PATH`: path (on mobile device) where the kernel implementations will be dumped.
|
||||
- `BENCHMARK_MODEL_PATH`: path (on android device) where the binary file `benchmark_model` is deployed.
|
||||
- `DEVICE_SERIAL`: if there are multiple adb devices connected to your host, you need to provide the corresponding serial id. Set to `''` if there is only one device connected to your host.
|
||||
|
||||
For VPU backends with OpenVINO, the required parameters include:
|
||||
|
||||
- `OPENVINO_ENV`: path to openvino virtual environment (./docs/requirements/openvino_requirements.txt is provided)
|
||||
- `OPTIMIZER_PATH`: path to openvino optimizer
|
||||
- `OPENVINO_RUNTIME_DIR`: directory to openvino runtime
|
||||
- `DEVICE_SERIAL`: serial id of the device
|
||||
- `DATA_TYPE`: data type of the model (e.g., fp16, fp32)
|
||||
|
||||
When the configuration edits are done, users should initialize the workspace in `builder_config` module before connecting the backend:
|
||||
|
||||
```python
|
||||
from nn_meter.builder import builder_config
|
||||
|
||||
# initialize builder config with workspace
|
||||
builder_config.init(
|
||||
workspace_path="path/to/workspace/folder"
|
||||
) # change the text to required platform type and workspace path
|
||||
```
|
||||
|
||||
Note: after executing ``builder_config.init``, the config are loaded permanently. If users want to update a config, it's required to repeat this initialization process again.
|
||||
|
||||
|
||||
## Connect to Backend
|
||||
We recommend users run the following command to test the connection with your backend:
|
||||
|
||||
``` Bash
|
||||
nn-meter connect --backend <backend-name> --workspace <path/to/workspace>
|
||||
```
|
||||
|
||||
If the connection is successfully built, a message will be shown as:
|
||||
|
||||
``` text
|
||||
(nn-Meter) hello backend !
|
||||
```
|
||||
|
||||
To apply the backend for model inference and profiling, nn-Meter provides an interface `connect_backend` to initialize the backend connection. When using `connect_backend`, you need define a name for the backend.
|
||||
|
||||
```python
|
||||
# initialize workspace in code
|
||||
workspace_path = "/path/to/workspace/"
|
||||
from nn_meter.builder import builder_config
|
||||
builder_config.init(workspace_path)
|
||||
|
||||
# connect to backend
|
||||
from nn_meter.builder.backends import connect_backend
|
||||
backend = connect_backend(backend_name='tflite_cpu')
|
||||
...
|
||||
```
|
||||
Users can follow [this example](../../examples/nn-meter_builder_with_tflite.ipynb) to get more details about our API.
|
||||
|
||||
|
||||
# <span id="build-customized-backend"> Build Customized Backend </span>
|
||||
|
||||
## Prepare Customized Backend Class
|
||||
|
||||
nn-Meter provides API for users to customize their own backend. Here we firstly describe the implementation of `BaseBackend`. We define the base of all backend in `nn_meter.builder.backend.BaseBackend` as follows:
|
||||
|
||||
- `profiler_class`: a subclass inherit form `nn_meter.builder.backend.BaseProfiler` to specify the running command of the backend. A profiler contains commands to push the model to mobile device, run the model on the mobile device, get stdout from the mobile device, and related operations. In the implementation of a profiler, an interface of ``Profiler.profile()`` is required. Users need to modify this **at the most time**.
|
||||
|
||||
- `profile`: Main steps of ``Profiler.profile()`` includes 1) push the model file to edge devices, 2) profile models in required times and get back running results. Return the running results on edge device.
|
||||
|
||||
- `parser_class`: a subclass inherit form `nn_meter.builder.backend.BaseParser` to parse the profiled results. A parser parses the stdout from device runner and get required metrics. In the implementation of a parser, interface of `Parser.parse()` and property of `Parser.results()` are required. Users are required to modify this **all the time**.
|
||||
|
||||
- `parse`: a string parser to parse profiled results value from the standard output of devices runner. This method should return the instance class itself.
|
||||
|
||||
- `results`: warp the parsed results by ``ProfiledResults`` class from ``nn_meter.builder.backend_meta.utils`` and return the parsed results value.
|
||||
|
||||
- `update_configs`: update the config parameters for the backend. Users need to modify this **all the time**.
|
||||
|
||||
- `convert_model`: convert the Keras model instance to the type required by the backend inference. For **some time you will** need to modify this.
|
||||
|
||||
- `profile`: load model by model file path and run ``self.profile()``. nn-Meter only support latency for metric by now. Users may provide other
|
||||
metrics in their customized backend. At the **most time you won't** need to modify this.
|
||||
|
||||
- `profile_model_file`: load model by model file path and run ``self.profile()``. At the **most time you won't** need to modify this.
|
||||
|
||||
- `test_connection`: check the status of backend interface connection. For **some time you won't** need to implement this as it is for testing only.
|
||||
|
||||
Here is an example of how to create a new backend class:
|
||||
|
||||
```python
|
||||
from nn_meter.builder.backend import BaseBackend, BaseParser, BaseProfiler
|
||||
|
||||
class MyParser(BaseParser): ...
|
||||
class MyProfiler(BaseProfiler): ...
|
||||
|
||||
class MyBackend(BaseBackend):
|
||||
parser_class = MyParser
|
||||
profiler_class = MyProfiler
|
||||
```
|
||||
|
||||
Besides these customized backends, nn-Meter also provide TFLite backend (`nn_meter.builder.backend.TFLiteBackend`), and OpenVINO backend (`nn_meter.builder.backend.OpenVINOBackend`). If users want to create new device instance based on TFLite or OpenVINO, you can firstly inherit these two classes. Some methods such as `convert_model`, `profile`, and `test_connection` can be reused.
|
||||
|
||||
Here is an example that firstly inherits `TFLiteBackend` and then creates a backend named `my_tflite`:
|
||||
|
||||
```python
|
||||
from nn_meter.builder.backend import TFLiteBackend, TFLiteProfiler, BaseParser
|
||||
|
||||
class MyParser(BaseParser): ...
|
||||
class MyProfiler(TFLiteProfiler): ...
|
||||
|
||||
class MyTFLiteBackend(TFLiteBackend):
|
||||
parser_class = MyParser
|
||||
profiler_class = MyProfiler
|
||||
```
|
||||
|
||||
## Register Backend to nn-Meter
|
||||
|
||||
### Step 1: Create a Package for the Customized Backend
|
||||
|
||||
After preparing the backend class, users should also prepare a default config file in yaml format if there is any modifiable configs. This config file will be copied to workspace when running `nn-meter create --customized-workspace`. Users can refer to [the Configuration of TFLite and OpenVINO](#prepare-configuration-file) as a reference. nn-Meter suggests users to gather all code of backend and default config files in a package with a predefined location. The folder should contain all relevant classes, such as `Parser` and `Profiler`. A folder will be treated as a package with a `__init__.py` file added. Here is a demo of folder structure:
|
||||
|
||||
``` text
|
||||
./customized_backend/
|
||||
├── __init__.py
|
||||
├── backend.py
|
||||
├── utils.py
|
||||
└── default_config.yaml
|
||||
```
|
||||
|
||||
The interface of customized backend class is stored in `./customized_backend/backend.py`. In this demo, `backend.py` includes:
|
||||
|
||||
``` python
|
||||
import logging
|
||||
from nn_meter.builder.backends import BaseBackend, BaseParser, BaseProfiler
|
||||
|
||||
class MyParser(BaseParser): ...
|
||||
|
||||
class MyProfiler(BaseProfiler): ...
|
||||
|
||||
class MyBackend(BaseBackend):
|
||||
parser_class = MyParser
|
||||
profiler_class = MyProfiler
|
||||
|
||||
def __init__(self, config):
|
||||
pass
|
||||
|
||||
def test_connection(self):
|
||||
"""check the status of backend interface connection
|
||||
"""
|
||||
...
|
||||
logging.keyinfo("hello backend !")
|
||||
```
|
||||
|
||||
### Step 2: Prepare Meta File
|
||||
|
||||
Create a yaml file with following keys as meta file:
|
||||
|
||||
- `builtin_name`: builtin name used in nn-Meter configuration file to call the customized backend, such as `"my_backend"`.
|
||||
|
||||
- `package_location`: the absolute path of the package.
|
||||
|
||||
- `class_module`: the module of the backend class, in this example is `backend`, representing `backend.py`.
|
||||
|
||||
- `class_name`: the backend class name, in this example is `MyBackend`.
|
||||
|
||||
- `defaultConfigFile`: the absolute path of the default configuration file.
|
||||
|
||||
Following is an example of the yaml file:
|
||||
|
||||
```yaml
|
||||
builtin_name: my_backend
|
||||
package_location: /home/USERNAME/working/customized_backend
|
||||
class_module: backend
|
||||
class_name: MyBackend
|
||||
defaultConfigFile: /home/USERNAME/working/customized_backend/default_config.yaml
|
||||
```
|
||||
|
||||
### Step 3: Register Customized Backend into nn-Meter
|
||||
|
||||
Run the following command to register customized backend into nn-Meter:
|
||||
|
||||
``` bash
|
||||
nn-meter register --backend path/to/meta/file
|
||||
```
|
||||
If the registration success, nn-Meter will show:
|
||||
``` text
|
||||
(nn-Meter) Successfully register backend my_backend.
|
||||
```
|
||||
|
||||
nn-Meter will test whether the module can be imported during the registration process. If the registration process is not successful, please check the package according to the error information.
|
||||
|
||||
After the backend registration, users can check all backends by running:
|
||||
``` bash
|
||||
nn-meter --list-backends
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported backends: ('*' indicates customized backends)
|
||||
(nn-Meter) [Backend] tflite_cpu
|
||||
(nn-Meter) [Backend] tflite_gpu
|
||||
(nn-Meter) [Backend] openvino_vpu
|
||||
(nn-Meter) [Backend] * my_backend
|
||||
```
|
||||
|
||||
Note: the package of customized backend must be retained in a fixed path as registered one. Otherwise may cause error when calling the registered module.
|
||||
|
||||
### Step 4: Test the Registered Backend
|
||||
|
||||
After the registration, users can create customized workspace according to the customized backend:
|
||||
|
||||
``` bash
|
||||
nn-meter create --customized-workspace <workspace-path> --backend my_backend
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Workspace <workspace-path> for customized platform has been created. Users can edit experiment config in <workspace-path>/configs/.
|
||||
```
|
||||
|
||||
Users could edit experiment configuration file in `<workspace-path>/configs/backend_config.yaml`, and test the connection to the registered backend by running:
|
||||
|
||||
``` bash
|
||||
nn-meter connect --backend my_backend --workspace <workspace-path>
|
||||
```
|
||||
```
|
||||
(nn-Meter) hello backend !
|
||||
```
|
||||
|
||||
## Use the Customized Backend in Experiment
|
||||
|
||||
If the backend is successfully registered, users are supposed to access to the customized backend by calling its builtin name:
|
||||
|
||||
``` python
|
||||
# initialize builder config with workspace
|
||||
from nn_meter.builder import builder_config
|
||||
builder_config.init(workspace_path="...") # the path of workspace
|
||||
|
||||
# connect to backend
|
||||
from nn_meter.builder.backends import connect_backend
|
||||
backend = connect_backend(backend_name='my_backend')
|
||||
```
|
||||
|
||||
## Manage the Registered Backend
|
||||
|
||||
Users can view all builtin and registered backends by running:
|
||||
|
||||
``` bash
|
||||
nn-meter --list-backends
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported backends: ('*' indicates customized backends)
|
||||
(nn-Meter) [Backend] tflite_cpu
|
||||
(nn-Meter) [Backend] tflite_gpu
|
||||
(nn-Meter) [Backend] openvino_vpu
|
||||
(nn-Meter) [Backend] * my_backend
|
||||
```
|
||||
|
||||
Besides, users can unregister the backend by calling its name in command:
|
||||
|
||||
``` bash
|
||||
nn-meter unregister --backend my_backend
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Successfully unregister my_backend.
|
||||
```
|
||||
|
||||
After unregister the backend, "my_backend" will be removed from the backend list:
|
||||
|
||||
``` bash
|
||||
nn-meter --list-backends
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Supported backends: ('*' indicates customized backends)
|
||||
(nn-Meter) [Backend] tflite_cpu
|
||||
(nn-Meter) [Backend] tflite_gpu
|
||||
(nn-Meter) [Backend] openvino_vpu
|
||||
```
|
|
@ -0,0 +1,655 @@
|
|||
# Build Fusion Rule Tester
|
||||
|
||||
A fusion rule tester creates a series of models (what we call "test case" in nn-Meter). It generates test case models of pairs of operators, profiles the models' latency, and finally, detects the fusion rules for every pair of operators. To build a fusion rule tester, there are four steps to implement the rule detection.
|
||||
|
||||
## Step 1. Prepare Backends and Create Workspace
|
||||
|
||||
The first step to run fusion rule tester is to prepare backends and create workspace. Users could follow guidance [Prepare Backends](./prepare_backend.md) and [Create Workspace](./overview.md#create-workspace) for this step.
|
||||
|
||||
After creating the workspace, a yaml file named `ruletest_config.yaml` will be placed in `<workspace-path>/configs/`. The fusion rule test configs includes:
|
||||
|
||||
- `HW`: Default input shape of all test cases except those requiring 1d tensor input. Default value is `28`.
|
||||
- `CIN`: Default input channel of all test cases. Default value is `16`.
|
||||
- `SHAPE_1D`: Default input shape of all testcases that need 1d tensor input. E.g., fully connected layer. Default value is `428`.
|
||||
- `COUT`: Default output channel (filter size). Default value is `256`.
|
||||
- `KERNEL_SIZE`: Default kernel size. Default value is `3`.
|
||||
- `PADDING`: Default padding type. Default value is `"same"`.
|
||||
- `STRIDES`: Default strides size. Default value is `1`.
|
||||
- `POOL_STRIDES`: Default strides size for pooling operator. Default value is `2`.
|
||||
- `EPS_ALPHA`: The empirical coefficient as a threshold in formula of [step 4](#step-4-detect-fusion-rule) to decide whether two ops can be fused for test cases of BasicFusion. Default value is `0.5`.
|
||||
- `DETAIL`: Whether to attach detail information to the json output, such as the shape information in profiled results, and the latency results of each test case in detected fusion rules. Default value is `FALSE`.
|
||||
- `BASIC_TESTCASES`: the test cases list to test. Generally, there are three types of test cases. Basic test cases detect the fusion rule of single inbound and outbound operators pairs.
|
||||
- `OTHER_TESTCASES`: in this list, `'MON'` detects the fusion rules about multiple outbounds connection. Besides, users can add name of customized test cases after test cases registration. For more details refer to the introduction of [Test Cases](#test-cases) and [Build Customized Test Cases](#build-customized-test-cases).
|
||||
- `LAYERS_1D`: the list of layer name for 1d tensor input. If the input to this layer must be 1 dimension tensor, users need add it here.
|
||||
|
||||
Note: nn-Meter doesn't support different `"COUT"` parameters for conv layer. If there are two successive convolutional layers in the test case, the output channel of the two layer will be the same as `"COUT"` parameter.
|
||||
|
||||
Users could open `<workspace-path>/configs/ruletest_config.yaml` and edit the content. After completing configuration, users could initialize workspace in `builder_config` module before building the fusion rule tester:
|
||||
|
||||
```python
|
||||
from nn_meter.builder import builder_config
|
||||
|
||||
# initialize builder config with workspace
|
||||
builder_config.init(
|
||||
workspace_path="path/to/workspace/folder"
|
||||
) # change the text to required platform type and workspace path
|
||||
```
|
||||
|
||||
Note: after running ``builder_config.init``, the config are loaded already. If users want to update config, after the updated config file is saved and closed, the config will take effect after reload config space by running ``builder_config.init`` again.
|
||||
|
||||
## Step 2. Create Test Cases
|
||||
|
||||
Following configuration from `<workspace-path>/configs/ruletest_config.yaml`, the test cases can be created by running:
|
||||
|
||||
```python
|
||||
from nn_meter.builder.backend_meta.fusion_rule_tester import generate_testcases
|
||||
|
||||
# generate testcases
|
||||
origin_testcases = generate_testcases()
|
||||
```
|
||||
|
||||
The test case models will be saved in `<workspace-path>/fusion_rule_test/models/`, and the information of test cases will be saved in `<workspace-path>/fusion_rule_test/results/origin_testcases.json`.
|
||||
|
||||
## Step 3. Run Test Cases on Given Backend
|
||||
|
||||
Given required backend, users could run test cases model and get the profiled latency value by running:
|
||||
|
||||
```python
|
||||
# connect to backend
|
||||
from nn_meter.builder.backends import connect_backend
|
||||
backend = connect_backend(backend_name='tflite_cpu')
|
||||
|
||||
# run testcases and collect profiling results
|
||||
from nn_meter.builder import profile_models
|
||||
profiled_results = profile_models(backend, origin_testcases, mode='ruletest')
|
||||
```
|
||||
`backend` refers to the name of concrete device to execute the model. Currently we provide three devoce instance, i.e., CPU backend, GPU backend with TFLite platform, and VPU backend with OpenVINO platform. Refer to [backend guidance](./prepare_backend.md) for how to setup the device and get connection to the backend. To use the customized backend, users can follow the [customize backend guidance](./prepare_backend.md#build_customized_backend).
|
||||
|
||||
The profiled test cases dictionary will be saved in `<workspace-path>/fusion_rule_test/results/profiled_results.json`.
|
||||
|
||||
## <span id="step-4-detect-fusion-rule"> Step 4. Detect Fusion Rule </span>
|
||||
|
||||
Finally, users could detect the fusion rule according to the profiled test cases by running:
|
||||
|
||||
```python
|
||||
from nn_meter.builder.backend_meta.fusion_rule_tester import detect_fusion_rule
|
||||
|
||||
# determine fusion rules from profiling results
|
||||
detected_results = detect_fusion_rule(profiled_results)
|
||||
```
|
||||
|
||||
Two operators $Op1$ and $Op2$ are regarded as being fused as fused as $Op1 +Op2$ fused if the time of operators follows:
|
||||
$$
|
||||
T_{Op1} + T_{Op2} - T_{Op1,Op2} > \alpha * min(T_{Op1}, T_{Op2})
|
||||
$$
|
||||
|
||||
After running `detect_fusion_rule`, a json file named `<workspace-path>/fusion_rule_test/results/detected_results.json` will be created to save the detection result. The result shows each test case obeys the fusion rule or not. A instance from the detection result is shown below:
|
||||
|
||||
```json
|
||||
"BF_se_relu": {
|
||||
"latency": {
|
||||
"block": "20.3537 +- 1.0",
|
||||
"se": "20.521 +- 1.0",
|
||||
"relu": "2.6194 +- 2.0",
|
||||
"ops": "23.1404 +- 2.23606797749979"
|
||||
},
|
||||
"obey": true
|
||||
},
|
||||
...
|
||||
```
|
||||
In the results, four `"latency"` value represents the running time of ops `"block"` (which indicates $T_{Op1,Op2}$), two single ops `"se"` ($T_{Op1})$) and `"relu"` ($T_{Op2}$), and the sum of two ops `"ops"` ($T_{Op1} + T_{Op2}$), respectively. `"obey"` shows whether the test case obeys the fusion rule, with `true` indicates the two testing ops is fused on the backend, while `false` indicates not.
|
||||
|
||||
Note: the latency value will be recorded only when `'DETAIL'` set as `True` in `<workspace-path>/configs/ruletest_config.yaml`.
|
||||
|
||||
## End-to-end Demo
|
||||
|
||||
Here is an end-to-end demo for the progress for the fusion rule testing:
|
||||
|
||||
```python
|
||||
from nn_meter.builder import profile_models, builder_config
|
||||
builder_config.init("path/to/workspace/folder") # initialize builder config with workspace
|
||||
from nn_meter.builder.backends import connect_backend
|
||||
from nn_meter.builder.backend_meta.fusion_rule_tester import generate_testcases, detect_fusion_rule
|
||||
|
||||
# generate testcases
|
||||
origin_testcases = generate_testcases()
|
||||
|
||||
# connect to backend
|
||||
backend = connect_backend(backend_name='tflite_cpu')
|
||||
|
||||
# run testcases and collect profiling results
|
||||
profiled_results = profile_models(backend, origin_testcases, mode='ruletest')
|
||||
|
||||
# determine fusion rules from profiling results
|
||||
detected_results = detect_fusion_rule(profiled_results)
|
||||
```
|
||||
|
||||
Three are three main steps, including 1) generate testcase, 2) profile models, and 3) detect fusion rule. For each step, the output will be dumped to `<workspace-path>/fusion_rule_test/results/`. Both the testcases instance and path string to the dumped testcases file are acceptable for the next step.
|
||||
|
||||
Note: it's optional to use a backend. What `profile_models` do is collecting latency results of each testcases, so you can use your own tools to measure the latency. Refer to implementation of `profile_models` for how to fill back the latency.
|
||||
|
||||
# <span id="test-cases"> Test Cases </span>
|
||||
|
||||
Testcases are a series of models created by nn-Meter. These models will be profiled to get latency. By analyzing the latency results, we are able to detect the fusion rules on the device. Finally, the detected fusion rules will be used to direct the process of kernel detection.
|
||||
|
||||
In this section, we will explain how our test case classes are implemented and how to customized your own test cases.
|
||||
|
||||
## Test Cases Design
|
||||
|
||||
Our test case design is driven by two features of a CNN model which impact the fusion rules, i.e., operator type and operator connection
|
||||
|
||||
### <span id="basic-test-cases"> Basic Test Cases </span>
|
||||
|
||||
Currently, we provide these operators with corresponding name:
|
||||
|
||||
- `conv`: conv2d layer implemented by `tf.keras.layers.Conv2D`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `dwconv`: dwconv2d layer implemented by `tf.keras.layers.DepthwiseConv2D`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `convtrans`: conv2d transpose layer implemented by `tf.nn.conv2d_transpose`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `bn`: batch normalization layer implemented by `tf.keras.layers.GlobalAveragePooling2D`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `maxpool`: max pooling layer implemented by `tf.keras.layers.MaxPool2D`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `avgpool`: average pooling layer implemented by `tf.keras.layers.AveragePooling2D`. Input tensor: 3d; Output tensor: 3d.
|
||||
- `globalavgpool`: global average pooling layer implemented by `tf.keras.layers.GlobalAveragePooling2D`. Input tensor: 3d; Output tensor: 1d.
|
||||
- `se`: squeeze excite block implemented refering to [official version](https://github.com/tensorflow/models/blob/89dd9a4e2548e8a5214bd4e564428d01c206a7db/research/slim/nets/mobilenet/conv_blocks.py#L408). Input tensor: 3d; Output tensor: 3d.
|
||||
- `fc`: fully connection layer implemented by `tf.keras.layers.Dense`. Input tensor: 1d; Output tensor: 1d.
|
||||
- `relu`: relu activation layer implemented by `tf.keras.layers.ReLU`. Input tensor: 3d or 1d; Output tensor: 3d or 1d.
|
||||
- `relu6`: relu5 activation layer implemented by `tf.nn.relu6`. Input tensor: 3d or 1d; Output tensor: 3d or 1d.
|
||||
- `sigmoid`: sigmoid activation layer implemented by `tf.nn.sigmoid`. Input tensor: 3d or 1d; Output tensor: 3d or 1d.
|
||||
- `hswish`: hswish activation layer implemented by `tf.nn.relu6`. Input tensor: 3d or 1d; Output tensor: 3d or 1d.
|
||||
- `reshape`: reshape layer implemented by `tf.reshape`. Input tensor: 3d tensor with shape [H, W, C], or 1d tensor with shape [CIN]; Output tensor: 3d tensor with shape [C, H, W], or 3d tensor with shape [1, 2, CIN / 2]. `CIN` is required to be odd.
|
||||
- `add`: add layer implemented by `tf.keras.layers.Add`. Input tensor: list of two 3d tensor with shape [[H, W, C], [H, W, C]], or 1d tensor with shape [CIN]; Output tensor: one 3d tensor with shape [H, W, C], or one 1d tensor with shape [CIN]. The input tensor will be duplicated as input tensor list.
|
||||
- `concat`: concatenation layer implemented by `tf.keras.layers.Concatenate`. Input tensor: list of two 3d tensor with shape [[H, W, C], [H, W, C]], or 1d tensor with shape [CIN]; Output tensor: one 3d tensor with shape [H, W, 2 * C], or 1d tensor with shape [CIN * 2]. The input tensor will be duplicated as input as input tensor list.
|
||||
- `flatten`: flatten layer implemented by `tf.keras.layers.Flatten`. Input tensor: 3d; Output tensor: 1d.
|
||||
- `split`: split layer implemented by `tf.split`. Input tensor: 3d; Output tensor: list of two 3d tensor with shape [[H, W, C / 2], [H, W, C / 2]]. `CIN` is required to be odd.
|
||||
|
||||
Above ops can be used for fusion rule testing of single inbound and outbound operator connections, which we also call it by "basic test case". In each basic test cases, there are three models generated, including two models containing single op respectively, and a model containing the block of two ops. The test case will test if the two ops will be fused as a block in inference. Users could edit `'BASIC_TESTCASES'` in `<workspace-path>/configs/ruletest_config.yaml` to determine the interested combination. A demo of `'BASIC_TESTCASES'` is:
|
||||
|
||||
```json
|
||||
BASIC_TESTCASES:
|
||||
- conv_avgpool
|
||||
- conv_relu
|
||||
```
|
||||
|
||||
which indicates that in the progress of fusion rule detection, two test cases will be generated, including the test case of `conv` op and `avgpool` op, as well as the test case of `conv` op and `avgpool` op. To add new test case, users could use the layer name, connect the op names by `"_"` and add the string to `'BASIC_TESTCASES'`.
|
||||
|
||||
Note: if the input to this layer is 1 dimension tensor, users should add it into `'LAYERS_1D'` in `<workspace-path>/configs/ruletest_config.yaml`.
|
||||
|
||||
### Other Test Cases
|
||||
|
||||
Besides of operator type, operator connection also impacts fusion rules. nn-Meter composed three basic connection types, including 1) single inbound and out bound, 2) multiple outbounds, and 3) multiple inbounds. Our study have shown that there will not be any fusion for multiple inbound type, so that we didn't provide any test case for it.
|
||||
|
||||
To test multiple outbounds, nn-Meter formed a test case with two branches, named `'MON'`(multiple out nodes). The implementation of the test case block is shown below:
|
||||
|
||||
```python
|
||||
input_layer = keras.Input(shape=input_shape)
|
||||
x = keras.layers.DepthwiseConv2D(kernel_size, padding=padding)(input_layer)
|
||||
branch_1 = keras.layers.ReLU(negative_slope=0)(x)
|
||||
branch_1 = keras.layers.ReLU(negative_slope=0)(branch_1)
|
||||
branch_2 = keras.layers.ReLU(negative_slope=2)(x)
|
||||
branch_2 = keras.layers.DepthwiseConv2D(kernel_size, padding=padding)(branch_2)
|
||||
return keras.models.Model(input_layer, [branch_1, branch_2])
|
||||
```
|
||||
|
||||
If there is a rule exists that `"dwconv_relu"` will be fused as a kernel, there are three cases for multiple outbounds kernel fusion, that is:
|
||||
```python
|
||||
cases = {
|
||||
'case1': ['relu_relu', 'relu_dwconv', 'dwconv'],
|
||||
'case2': ['dwconv_relu_relu', 'relu_dwconv'],
|
||||
'case3': ['dwconv_relu', 'dwconv', 'relu_relu']
|
||||
}
|
||||
```
|
||||
we need to test which fusion rule will the test case obey. The detection result of multiple outbounds test case will be a string from `['case1', 'case2', 'case3']`.
|
||||
|
||||
## Data Structure of Test Cases
|
||||
|
||||
Each test case consists of several test models to profile. Generally, for basic test cases, test models indicates two ops and a block combining the two ops. In each test models, `"model"` points to its directory to the path of this ops' `Keras` model, `"shapes"` indicates the input shape of the tensor to test, and `"latency"` reports the profiled results after running `run_testcases`. This is a json dump of generated test cases. Note that the `"latency"` attribute appears after running and profiling the test cases.
|
||||
|
||||
```json
|
||||
{
|
||||
"dwconv_relu": {
|
||||
"dwconv": {
|
||||
"model": "./fusion_rule_test/models/BF_dwconv_relu_dwconv",
|
||||
"shapes": [
|
||||
[
|
||||
28,
|
||||
28,
|
||||
16
|
||||
]
|
||||
],
|
||||
"latency": "41.781 +- 1.0"
|
||||
},
|
||||
"relu": {
|
||||
"model": "./fusion_rule_test/models/BF_dwconv_relu_relu",
|
||||
"shapes": [
|
||||
[
|
||||
28,
|
||||
28,
|
||||
16
|
||||
]
|
||||
],
|
||||
"latency": "2.36618 +- 0.0"
|
||||
},
|
||||
"block": {
|
||||
"model": "./fusion_rule_test/models/BF_dwconv_relu_block",
|
||||
"shapes": [
|
||||
[
|
||||
28,
|
||||
28,
|
||||
16
|
||||
]
|
||||
],
|
||||
"latency": "41.4198 +- 1.0"
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
In this instance, `dwconv_relu` is the name of a test case. There are three models called `dwconv`, `relu` and `block`. For each model, the `"model"` indicates the path to where the model is saved. In the name of model, `"BF"` indicates the test case belong to a basic fusion test case, `"dwconv_relu"` indicates the name of the test case, and the last clause (`"dwconv"`, `"relu"`, or `"block"`) indicates the model name in that test case. `"shapes"` indicates the list of the its input tensor shape (`[H, W, C]`). For example, here `[[28, 28, 16]]` means this model has only one input, and the shape is `(28, 28, 16)`.
|
||||
|
||||
# Apply Fusion Rules for Kernel Detection
|
||||
|
||||
The output json file `<workspace-path>/fusion_rule_test/results/detected_fusion_rule.json` shows all fusion rule detected from test cases. Users could directly apply the json file for kernel detection.
|
||||
|
||||
The fusion rules json file will be a part of the customized predictor. Users could refer to [Customize Predictor](../predictor/customize_predictor.md) to prepare other parts of predictor and register predictor.
|
||||
|
||||
# <span id="build-customized-test-cases"> Build Customized Test Cases </span>
|
||||
|
||||
## Build Basic Test cases
|
||||
|
||||
Currently, nn-Meter support the following ops:
|
||||
|
||||
- `'conv'`
|
||||
|
||||
- `'dwconv'`
|
||||
|
||||
- `'convtrans'`
|
||||
|
||||
- `'bn'`
|
||||
|
||||
- `'maxpool'`
|
||||
|
||||
- `'avgpool'`
|
||||
|
||||
- `'globalavgpool'`
|
||||
|
||||
- `'se'`
|
||||
|
||||
- `'fc'`
|
||||
|
||||
- `'relu'`
|
||||
|
||||
- `'relu6'`
|
||||
|
||||
- `'sigmoid'`
|
||||
|
||||
- `'hswish'`
|
||||
|
||||
- `'reshape'`
|
||||
|
||||
- `'add'`
|
||||
|
||||
- `'concat'`
|
||||
|
||||
- `'flatten'`
|
||||
|
||||
- `'split'`
|
||||
|
||||
Refer to [basic test cases](#basic-test-cases) for more details of supporting ops. To apply existing ops, users could directly declare the op name and ops connection in `'BASIC_TESTCASES'` from `<workspace-path>/configs/ruletest_config.yaml` to generate their own test cases.
|
||||
|
||||
If users want to add new operators into basic test cases, here are several steps to prepare and register new operators:
|
||||
|
||||
### Step 1: Prepare the Customized Operator Class
|
||||
|
||||
nn-Meter provide API for users to customize their own operator. In nn-Meter, each operator is implemented by inheriting a base class named `nn_meter.builder.nn_generator.BaseOperator`. The class has two input parameters, including `input_shape` and `config`. `input_shape` is a list showing the dimension of the input tensor (the batch dimension should not be included), and `config` can be used to feed configuration params for the operator. There are following methods in this base class:
|
||||
|
||||
- `get_model`: Return the model function of the operator. Users need to modify this **all the time**.
|
||||
|
||||
- `get_output_shape`: Return a list representing the output shape of the operator. Users need to modify this **at the most time**. If the output has same shape with input, users don't need to override the method.
|
||||
|
||||
- `get_is_two_inputs`: Whether the operator has two input tensor. If the operator has only one input tensor, the returned value should be set as `False`. For **some time you will** need to modify this. If the value is `False`, users don't need to override the method.
|
||||
|
||||
- `test_operator`: A test script to verify the operator. At the **most time you won't** need to modify this.
|
||||
|
||||
We provide the implementation of three builtin operators for example:
|
||||
|
||||
The first example is Conv2d operator. The operator simply applying APIs from `tensorflow.keras.layers` to build the model function. Also, users could build a class inheriting `tensorflow.keras.layers.Layer` for customized usage:
|
||||
|
||||
``` python
|
||||
class Conv(BaseOperator):
|
||||
def get_model(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
return keras.layers.Conv2D(
|
||||
cout,
|
||||
kernel_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
output_h = (self.input_shape[0] - 1) // self.config["STRIDES"] + 1
|
||||
output_w = (self.input_shape[1] - 1) // self.config["STRIDES"] + 1
|
||||
return [output_h, output_w, cout]
|
||||
```
|
||||
|
||||
The second example is Sigmoid operator. The opertor build a model function by applying `tensorflow.nn`:
|
||||
|
||||
``` python
|
||||
class Sigmoid(BaseOperator):
|
||||
def get_model(self):
|
||||
def func(inputs):
|
||||
return tf.nn.sigmoid(inputs)
|
||||
return func
|
||||
```
|
||||
|
||||
The third example is an operator class with two input tensor. In this case, users should notice that the `output_shape` must cover all probably cases for `input_shape`:
|
||||
|
||||
``` python
|
||||
class Add(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.Add()
|
||||
|
||||
def get_output_shape(self):
|
||||
if len(self.input_shape) == 2 and type(self.input_shape[0]) == list:
|
||||
output_shape = self.input_shape[0]
|
||||
else:
|
||||
output_shape = self.input_shape
|
||||
return output_shape
|
||||
|
||||
def get_is_two_inputs(self):
|
||||
return True
|
||||
```
|
||||
|
||||
Note: all configuration value are feed into the operators by the param `config`, which gets data from `<workspace-path>/configs/ruletest_config.yaml`. Users should follow the same notation of configs to transfer parameters. If there is any new parameters needed in `config`, users should also add the parameter and set its value in `<workspace-path>/configs/ruletest_config.yaml`.
|
||||
|
||||
### Step 2: Create a Package for the Customized Operator
|
||||
|
||||
nn-Meter requires users to gather all code of operator in a package with a fixed location. A folder will be treated as a package with a `__init__.py` file added. Here is a demo of folder structure:
|
||||
|
||||
``` text
|
||||
./customized_operator/
|
||||
├── __init__.py
|
||||
└── operator_script.py
|
||||
```
|
||||
|
||||
The interface of customized operator class are stored in `./customized_operator/operator_script.py`. In this demo, the content of `operator_script.py` includes:
|
||||
|
||||
``` python
|
||||
from nn_meter.builder.nn_generator import BaseOperator
|
||||
from tensorflow import keras
|
||||
|
||||
def Op1(BaseOperator):
|
||||
def get_model(self):
|
||||
return ...
|
||||
```
|
||||
|
||||
Note: The folder could contain more than one operators, but the registration should be done one by one.
|
||||
|
||||
### Step 3: Prepare Meta File
|
||||
|
||||
Create a yaml file with following keys as meta file:
|
||||
|
||||
- `builtin_name`: builtin name used in nn-Meter configuration file to call the customized operator, such as `"op1"`. Note that there should not have any "\_" in the `buildinName`, as any "\_" will be regarded as the connection of different operators in test cases generation.
|
||||
|
||||
- `package_location`: the absolute path of the package folder.
|
||||
|
||||
- `class_module`: the module of the operator class, in this example is `operator_script`, representing `operator_script.py`.
|
||||
|
||||
- `class_name`: the operator class name, in this example is `Op1`.
|
||||
|
||||
Following is an example of the yaml file:
|
||||
|
||||
```yaml
|
||||
builtin_name: op1
|
||||
package_location: /home/USERNAME/working/customized_operator
|
||||
class_module: operator_script
|
||||
class_name: Op1
|
||||
```
|
||||
|
||||
### Step 4: Register Customized Operator into nn-Meter
|
||||
|
||||
Run the following command to register customized operator into nn-Meter:
|
||||
|
||||
``` bash
|
||||
nn-meter register --operator path/to/meta/file
|
||||
```
|
||||
If the registration success, nn-Meter will show:
|
||||
``` text
|
||||
(nn-Meter) Successfully register operator: op1
|
||||
```
|
||||
|
||||
When registering, nn-Meter will test whether the module can be imported first. If the registration success is not successful, please check the package according to the error information.
|
||||
|
||||
After backend registration, users can view all operators by running:
|
||||
``` bash
|
||||
nn-meter --list-operators
|
||||
```
|
||||
```text
|
||||
(nn-Meter) Supported operators: ('*' indicates customized operators)
|
||||
(nn-Meter) [Operator] conv
|
||||
(nn-Meter) [Operator] dwconv
|
||||
(nn-Meter) [Operator] convtrans
|
||||
(nn-Meter) [Operator] bn
|
||||
(nn-Meter) [Operator] globalavgpool
|
||||
(nn-Meter) [Operator] maxpool
|
||||
(nn-Meter) [Operator] avgpool
|
||||
(nn-Meter) [Operator] se
|
||||
(nn-Meter) [Operator] fc
|
||||
(nn-Meter) [Operator] relu
|
||||
(nn-Meter) [Operator] relu6
|
||||
(nn-Meter) [Operator] sigmoid
|
||||
(nn-Meter) [Operator] hswish
|
||||
(nn-Meter) [Operator] reshape
|
||||
(nn-Meter) [Operator] add
|
||||
(nn-Meter) [Operator] concat
|
||||
(nn-Meter) [Operator] flatten
|
||||
(nn-Meter) [Operator] split
|
||||
(nn-Meter) [Operator] * op1
|
||||
```
|
||||
|
||||
Note: the package of customized operator must be retained in a fixed path as registered one. Otherwise may cause error when calling the registered module.
|
||||
|
||||
### Use the Customized Operator in Experiment
|
||||
|
||||
After registration, users could apply the customized operator to generate test case:
|
||||
|
||||
``` yaml
|
||||
# .yaml file in `<workspace-path>/configs/ruletest_config.yaml`
|
||||
...
|
||||
BASIC_TESTCASES:
|
||||
- op1_relu
|
||||
LAYERS_1D:
|
||||
- fc
|
||||
- op1
|
||||
```
|
||||
|
||||
Note: if the input to customized operator should be 1 dimension tensor, users need add the builtin name `LAYERS_1D`.
|
||||
|
||||
### Manage the Registered Operator
|
||||
|
||||
Users could unregister the operator by calling its name in command:
|
||||
|
||||
``` bash
|
||||
nn-meter unregister --operator op1
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Successfully unregister op1.
|
||||
```
|
||||
|
||||
After unregister the operator, "op1" will be removed from the backend list.
|
||||
|
||||
## Build Other Test Case
|
||||
|
||||
### Step 1: Prepare the Customized Test Case Class
|
||||
Customized other test case are more complicated. Here we describe the implementation of `BaseTestCase` first. We define the base of all test case generator in `nn_meter.builder.backend_meta.fusion_rule_tester.BaseTestCase`. There are following methods in this base class:
|
||||
|
||||
- `generate_testcase`: Generate all test models for this test case. At the **most time you won't** need to modify this.
|
||||
|
||||
- `save_testcase`: Save the test cases models and return the test cases information. The `_model_block` of rule `Rule1` will be saved as name `Rule1_block`. At the **most time you won't** need to modify this.
|
||||
|
||||
- `load_latency`: Load the latency from test case information (usually shown as a dictionary class in json format). At **the most time you won't** need to modify this.
|
||||
|
||||
- `test`: Decide the truth or case of this rule by analyzing latency results. For **some time you will** need to modify this.
|
||||
|
||||
- `load_config`: Load configuration that will be used in the class. At the **most time you won't** need to modify this.
|
||||
|
||||
- Methods starting with `_model_`: It is used to define the structure of model in testcases. For example, if you define `_model_conv` in the class, then you can use `conv` in field `cases`. This means `conv` will be generated as a model, profiled and used for latency analysis as a component of the case used in. For example,
|
||||
|
||||
```python
|
||||
cases = {
|
||||
'case1': ['dwconv_add', 'dwconv', 'dwconv', 'add', 'relu'],
|
||||
'case2': ['dwconv_add_add', 'dwconv', 'dwconv', 'relu'],
|
||||
}
|
||||
```
|
||||
|
||||
Here latency of `case1` is the sum of latency of `_model_dwconv_add`, `_model_dwconv` * 2, `_model_add`, `_model_relu`.
|
||||
|
||||
**For all the time you will** need to implement `_model_block`.
|
||||
|
||||
Besides, there are also several class attributes:
|
||||
|
||||
- `name`: the name of the fusion rule
|
||||
|
||||
- `cases`: The potential splitting possibility of `_model_block`.
|
||||
|
||||
- `deps`: The truth of this rule will depend on truth of other rules.
|
||||
|
||||
- `_model_block`: The structure of the tested block.
|
||||
|
||||
Here is an example for customized test case:
|
||||
|
||||
```python
|
||||
from tensorflow import keras
|
||||
from nn_meter.builder.backend_meta.fusion_rule_tester import BaseTestCase
|
||||
|
||||
class MyTestCase(BaseTestCase):
|
||||
name = 'MyTestCase'
|
||||
cases = {
|
||||
'case1': ['dwconv_add', 'dwconv', 'dwconv', 'add', 'relu'],
|
||||
'case2': ['dwconv_add_add', 'dwconv', 'dwconv', 'relu'],
|
||||
}
|
||||
true_case = 'case1'
|
||||
deps = {
|
||||
'MON': True,
|
||||
'BF_dwconv_relu': True,
|
||||
}
|
||||
|
||||
def _model_block(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
branch_1 = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
branch_2 = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
output_1 = keras.layers.Add()([branch_1, branch_2])
|
||||
branch_3 = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
output_1 = keras.layers.Add()([branch_3, output_1])
|
||||
|
||||
output_2 = keras.layers.ReLU()(branch_3)
|
||||
|
||||
return keras.Model(input_layer, [output_1, output_2]), [self.input_shape]
|
||||
|
||||
def _model_dwconv_add(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
x = keras.layers.Add()([x, input_layer])
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
def _model_dwconv_add_add(self):
|
||||
input_1 = keras.Input(shape=self.input_shape)
|
||||
input_2 = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_1)
|
||||
x = keras.layers.Add()([x, input_1])
|
||||
x = keras.layers.Add()([x, input_2])
|
||||
|
||||
return keras.models.Model([input_1, input_2], x), [self.input_shape, self.input_shape]
|
||||
|
||||
def _model_dwconv_relu(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
x = keras.layers.Relu()([x, input_layer])
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
```
|
||||
|
||||
### Step 2: Create a Package for the Customized Test Case
|
||||
|
||||
nn-Meter requires users to gather all code of test case in a package with a fixed location. A folder will be treated as a package with a `__init__.py` file added. Here is a demo of folder structure:
|
||||
|
||||
``` text
|
||||
./customized_testcase/
|
||||
├── __init__.py
|
||||
└── testcase_script.py
|
||||
```
|
||||
|
||||
The interface of customized test case class are stored in `./customized_testcase/testcase_script.py`.
|
||||
|
||||
### Step 3: Prepare Meta File
|
||||
|
||||
Create a yaml file with following keys as meta file:
|
||||
|
||||
- `builtin_name`: builtin name used in nn-Meter configuration file to call the customized test case, such as `"MyTC"`.
|
||||
|
||||
- `package_location`: the absolute path of the package folder.
|
||||
|
||||
- `class_module`: the module of the test case class, in this example is `testcase_script`, representing `testcase_script.py`.
|
||||
|
||||
- `class_name`: the test case class name, in this example is `MyTestCase`.
|
||||
|
||||
Following is an example of the yaml file:
|
||||
|
||||
```yaml
|
||||
builtin_name: MyTC
|
||||
package_location: /home/USERNAME/working/customized_testcase
|
||||
class_module: testcase_script
|
||||
class_name: MyTC
|
||||
```
|
||||
|
||||
### Step 4: Register Customized Test Case into nn-Meter
|
||||
|
||||
Run the following command to register customized test case into nn-Meter:
|
||||
|
||||
``` bash
|
||||
nn-meter register --testcase path/to/meta/file
|
||||
```
|
||||
If the registration success, nn-Meter will show:
|
||||
``` text
|
||||
(nn-Meter) Successfully register testcase: MyTC
|
||||
```
|
||||
|
||||
When registering, nn-Meter will test whether the module can be imported first. If the registration success is not successful, please check the package according to the error information.
|
||||
|
||||
Note: the package of customized operator must be retained in a fixed path as registered one. Otherwise may cause error when calling the registered module.
|
||||
|
||||
### Use the Customized Operator in Experiment
|
||||
|
||||
After registration, users could apply the customized operator to generate test case:
|
||||
|
||||
``` yaml
|
||||
# .yaml file in `<workspace-path>/configs/ruletest_config.yaml`
|
||||
...
|
||||
OTHER_TESTCASES:
|
||||
- MyTC
|
||||
```
|
||||
|
||||
Note: if the truth of this fusion rule depends on truth of other rules, the test of precondition must be done first. That is, the fusion rule in attribute `deps` must be tested before or with the customized test case.
|
||||
|
||||
### Manage the Registered Operator
|
||||
|
||||
Users could unregister the test case by calling its name in command:
|
||||
|
||||
``` bash
|
||||
nn-meter unregister --testcase MyTC
|
||||
```
|
||||
``` text
|
||||
(nn-Meter) Successfully unregister MyTC.
|
||||
```
|
||||
|
||||
## Use Customized Rules when Splitting
|
||||
|
||||
Currently we haven't provided api to split models using customized rules. We leave that to future work.
|
||||
|
||||
It's not suggested, but you can implement that by directly modifying the code at `nn_meter.kernel_detector.rule_splitter`.
|
|
@ -1,12 +1,10 @@
|
|||
# Overview
|
||||
Note: This is an beta (preview) version which is still under refining.
|
||||
|
||||
nn-Meter is a novel and efficient system to accurately predict the inference latency of DNN models on diverse edge devices.
|
||||
|
||||
## Key Techniques
|
||||
nn-Meter contains two key techniques: (i) kernel detection to automatically detect the execution unit of model inference via a set of well-designed test cases; (ii) adaptive sampling to efficiently sample the most beneficial configurations from a large space to build accurate kernel-level latency predictors.
|
||||
|
||||
nn-Meter currently supports multiple input model formats, please refer [input_models](input_models.md) for more details.
|
||||
nn-Meter currently supports multiple input model formats, please refer [input_models](predictor/input_models.md) for more details.
|
||||
|
||||
As discussed in nn-Meter paper, the approach is general to any DNN models on diverse edge devices. However, the current implementation considers the major CNN architectures on four types of hardware platforms. The following table shows the prediction performance of tested CNN model families on mobile CPU (i.e., *cortexA76cpu_tflite21*), mobile GPU 640 (i.e., *adreno640gpu_tflite21*), mobile GPU 630 (i.e., *adreno630gpu_tflite21*) and Intel VPU (i.e., *myriadvpu_openvino2019r2*).
|
||||
|
||||
|
@ -19,8 +17,26 @@ If you have a new hardware to predict DNN latency, a re-run of nn-Meter is requ
|
|||
## Learn More
|
||||
- [Get started](quick_start.md)
|
||||
|
||||
- [How to use nn-Meter Predictor](predictor/usage.md)
|
||||
- nn-Meter Key Techniques
|
||||
|
||||
- [nn-Meter in hardware-aware NAS](predictor/hardware-aware-model-design.md)
|
||||
- [Operator](ops.md)
|
||||
|
||||
- [nn-Meter bench dataset](dataset.md)
|
||||
- [Kernel](kernel.md)
|
||||
|
||||
- nn-Meter Predictor
|
||||
|
||||
- [How to use nn-Meter Predictor](predictor/usage.md)
|
||||
|
||||
- [nn-Meter in hardware-aware NAS](predictor/hardware-aware-model-design.md)
|
||||
|
||||
- [nn-Meter Bench Dataset](dataset.md)
|
||||
|
||||
- [nn-Meter Builder](builder/overview.md)
|
||||
|
||||
- [Connect Backend](builder/prepare_backend.md)
|
||||
|
||||
- [Detect Fusion Rules on Backend](builder/test_fusion_rules.md)
|
||||
|
||||
- [Build Kernel Latency Predictor](builder/build_kernel_latency_predictor.md)
|
||||
|
||||
- [Use Customized Predictor for Prediction](builder/customize_predictor.md)
|
|
@ -12,7 +12,7 @@ Here is a summary of supported inputs of the two methods.
|
|||
| Tensorflow | Checkpoint file dumped by `tf.saved_model()` and end with `.pb` | Checkpoint file dumped by `tf.saved_model` and end with `.pb` |
|
||||
| Torch | Models in `torchvision.models` | Object of `torch.nn.Module` |
|
||||
| Onnx | Checkpoint file dumped by `onnx.save()` and end with `.onnx` | Checkpoint file dumped by `onnx.save()` or model loaded by `onnx.load()` |
|
||||
| nn-Meter IR graph | Json file in the format of [nn-Meter IR Graph](../input_models.md#nnmeter-ir-graph) | `dict` object following the format of [nn-Meter IR Graph](../input_models.md#nnmeter-ir-graph) |
|
||||
| nn-Meter IR graph | Json file in the format of [nn-Meter IR Graph](./input_models.md#nnmeter-ir-graph) | `dict` object following the format of [nn-Meter IR Graph](./input_models.md#nnmeter-ir-graph) |
|
||||
| NNI IR graph | - | NNI IR graph object |
|
||||
|
||||
In both methods, users could appoint predictor name and version to target a specific hardware platform (device). Currently, nn-Meter supports prediction on the following four configs:
|
||||
|
@ -27,7 +27,7 @@ Users can get all predefined predictors and versions by running
|
|||
|
||||
```bash
|
||||
# to list all predefined predictors
|
||||
nn-meter --list-predictors
|
||||
nn-meter --list-predictors
|
||||
```
|
||||
|
||||
## Predict latency of saved CNN model
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
tensorflow>=1.2.0,<2.0.0
|
||||
networkx==2.3.0
|
||||
numpy>=1.12.0
|
||||
test-generator==0.1.1
|
||||
defusedxml>=0.5.0
|
||||
serial
|
|
@ -1,4 +1,5 @@
|
|||
numpy==1.18.5
|
||||
pandas
|
||||
tqdm
|
||||
matplotlib
|
||||
graphviz
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
tensorflow
|
||||
serial
|
||||
pure-python-adb
|
||||
typing
|
||||
networkx
|
|
@ -0,0 +1,382 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build Kernel Latency Predictor\n",
|
||||
"Users could refer to the [Guidance](../../docs/builder/build_kernel_latency_predictor.md) for details information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"from silence_tensorflow import silence_tensorflow\n",
|
||||
"warnings.filterwarnings('ignore')\n",
|
||||
"silence_tensorflow()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize builder config with workspace\n",
|
||||
"from nn_meter.builder import builder_config\n",
|
||||
"builder_config.init(\"/data/jiahang/working/tftest\") "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:absl:Found untraced functions such as conv2d_layer_call_fn, conv2d_layer_call_and_return_conditional_losses, re_lu_layer_call_fn, re_lu_layer_call_and_return_conditional_losses, conv2d_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_1_layer_call_fn, conv2d_1_layer_call_and_return_conditional_losses, re_lu_1_layer_call_fn, re_lu_1_layer_call_and_return_conditional_losses, conv2d_1_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_2_layer_call_fn, conv2d_2_layer_call_and_return_conditional_losses, re_lu_2_layer_call_fn, re_lu_2_layer_call_and_return_conditional_losses, conv2d_2_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_3_layer_call_fn, conv2d_3_layer_call_and_return_conditional_losses, re_lu_3_layer_call_fn, re_lu_3_layer_call_and_return_conditional_losses, conv2d_3_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_4_layer_call_fn, conv2d_4_layer_call_and_return_conditional_losses, re_lu_4_layer_call_fn, re_lu_4_layer_call_and_return_conditional_losses, conv2d_4_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_5_layer_call_fn, conv2d_5_layer_call_and_return_conditional_losses, re_lu_5_layer_call_fn, re_lu_5_layer_call_and_return_conditional_losses, conv2d_5_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_6_layer_call_fn, conv2d_6_layer_call_and_return_conditional_losses, re_lu_6_layer_call_fn, re_lu_6_layer_call_and_return_conditional_losses, conv2d_6_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_7_layer_call_fn, conv2d_7_layer_call_and_return_conditional_losses, re_lu_7_layer_call_fn, re_lu_7_layer_call_and_return_conditional_losses, conv2d_7_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_8_layer_call_fn, conv2d_8_layer_call_and_return_conditional_losses, re_lu_8_layer_call_fn, re_lu_8_layer_call_and_return_conditional_losses, conv2d_8_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_9_layer_call_fn, conv2d_9_layer_call_and_return_conditional_losses, re_lu_9_layer_call_fn, re_lu_9_layer_call_and_return_conditional_losses, conv2d_9_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_10_layer_call_fn, conv2d_10_layer_call_and_return_conditional_losses, re_lu_10_layer_call_fn, re_lu_10_layer_call_and_return_conditional_losses, conv2d_10_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_11_layer_call_fn, conv2d_11_layer_call_and_return_conditional_losses, re_lu_11_layer_call_fn, re_lu_11_layer_call_and_return_conditional_losses, conv2d_11_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_12_layer_call_fn, conv2d_12_layer_call_and_return_conditional_losses, re_lu_12_layer_call_fn, re_lu_12_layer_call_and_return_conditional_losses, conv2d_12_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_13_layer_call_fn, conv2d_13_layer_call_and_return_conditional_losses, re_lu_13_layer_call_fn, re_lu_13_layer_call_and_return_conditional_losses, conv2d_13_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_14_layer_call_fn, conv2d_14_layer_call_and_return_conditional_losses, re_lu_14_layer_call_fn, re_lu_14_layer_call_and_return_conditional_losses, conv2d_14_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_15_layer_call_fn, conv2d_15_layer_call_and_return_conditional_losses, re_lu_15_layer_call_fn, re_lu_15_layer_call_and_return_conditional_losses, conv2d_15_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_16_layer_call_fn, conv2d_16_layer_call_and_return_conditional_losses, re_lu_16_layer_call_fn, re_lu_16_layer_call_and_return_conditional_losses, conv2d_16_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_17_layer_call_fn, conv2d_17_layer_call_and_return_conditional_losses, re_lu_17_layer_call_fn, re_lu_17_layer_call_and_return_conditional_losses, conv2d_17_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_18_layer_call_fn, conv2d_18_layer_call_and_return_conditional_losses, re_lu_18_layer_call_fn, re_lu_18_layer_call_and_return_conditional_losses, conv2d_18_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_19_layer_call_fn, conv2d_19_layer_call_and_return_conditional_losses, re_lu_19_layer_call_fn, re_lu_19_layer_call_and_return_conditional_losses, conv2d_19_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_20_layer_call_fn, conv2d_20_layer_call_and_return_conditional_losses, re_lu_20_layer_call_fn, re_lu_20_layer_call_and_return_conditional_losses, conv2d_20_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_21_layer_call_fn, conv2d_21_layer_call_and_return_conditional_losses, re_lu_21_layer_call_fn, re_lu_21_layer_call_and_return_conditional_losses, conv2d_21_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_22_layer_call_fn, conv2d_22_layer_call_and_return_conditional_losses, re_lu_22_layer_call_fn, re_lu_22_layer_call_and_return_conditional_losses, conv2d_22_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_23_layer_call_fn, conv2d_23_layer_call_and_return_conditional_losses, re_lu_23_layer_call_fn, re_lu_23_layer_call_and_return_conditional_losses, conv2d_23_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_24_layer_call_fn, conv2d_24_layer_call_and_return_conditional_losses, re_lu_24_layer_call_fn, re_lu_24_layer_call_and_return_conditional_losses, conv2d_24_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_25_layer_call_fn, conv2d_25_layer_call_and_return_conditional_losses, re_lu_25_layer_call_fn, re_lu_25_layer_call_and_return_conditional_losses, conv2d_25_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_26_layer_call_fn, conv2d_26_layer_call_and_return_conditional_losses, re_lu_26_layer_call_fn, re_lu_26_layer_call_and_return_conditional_losses, conv2d_26_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_27_layer_call_fn, conv2d_27_layer_call_and_return_conditional_losses, re_lu_27_layer_call_fn, re_lu_27_layer_call_and_return_conditional_losses, conv2d_27_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_28_layer_call_fn, conv2d_28_layer_call_and_return_conditional_losses, re_lu_28_layer_call_fn, re_lu_28_layer_call_and_return_conditional_losses, conv2d_28_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_29_layer_call_fn, conv2d_29_layer_call_and_return_conditional_losses, re_lu_29_layer_call_fn, re_lu_29_layer_call_and_return_conditional_losses, conv2d_29_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_30_layer_call_fn, conv2d_30_layer_call_and_return_conditional_losses, re_lu_30_layer_call_fn, re_lu_30_layer_call_and_return_conditional_losses, conv2d_30_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_31_layer_call_fn, conv2d_31_layer_call_and_return_conditional_losses, re_lu_31_layer_call_fn, re_lu_31_layer_call_and_return_conditional_losses, conv2d_31_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_32_layer_call_fn, conv2d_32_layer_call_and_return_conditional_losses, re_lu_32_layer_call_fn, re_lu_32_layer_call_and_return_conditional_losses, conv2d_32_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_33_layer_call_fn, conv2d_33_layer_call_and_return_conditional_losses, re_lu_33_layer_call_fn, re_lu_33_layer_call_and_return_conditional_losses, conv2d_33_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_34_layer_call_fn, conv2d_34_layer_call_and_return_conditional_losses, re_lu_34_layer_call_fn, re_lu_34_layer_call_and_return_conditional_losses, conv2d_34_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_35_layer_call_fn, conv2d_35_layer_call_and_return_conditional_losses, re_lu_35_layer_call_fn, re_lu_35_layer_call_and_return_conditional_losses, conv2d_35_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_36_layer_call_fn, conv2d_36_layer_call_and_return_conditional_losses, re_lu_36_layer_call_fn, re_lu_36_layer_call_and_return_conditional_losses, conv2d_36_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_37_layer_call_fn, conv2d_37_layer_call_and_return_conditional_losses, re_lu_37_layer_call_fn, re_lu_37_layer_call_and_return_conditional_losses, conv2d_37_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_38_layer_call_fn, conv2d_38_layer_call_and_return_conditional_losses, re_lu_38_layer_call_fn, re_lu_38_layer_call_and_return_conditional_losses, conv2d_38_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_39_layer_call_fn, conv2d_39_layer_call_and_return_conditional_losses, re_lu_39_layer_call_fn, re_lu_39_layer_call_and_return_conditional_losses, conv2d_39_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_40_layer_call_fn, conv2d_40_layer_call_and_return_conditional_losses, re_lu_40_layer_call_fn, re_lu_40_layer_call_and_return_conditional_losses, conv2d_40_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_41_layer_call_fn, conv2d_41_layer_call_and_return_conditional_losses, re_lu_41_layer_call_fn, re_lu_41_layer_call_and_return_conditional_losses, conv2d_41_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_42_layer_call_fn, conv2d_42_layer_call_and_return_conditional_losses, re_lu_42_layer_call_fn, re_lu_42_layer_call_and_return_conditional_losses, conv2d_42_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_43_layer_call_fn, conv2d_43_layer_call_and_return_conditional_losses, re_lu_43_layer_call_fn, re_lu_43_layer_call_and_return_conditional_losses, conv2d_43_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_44_layer_call_fn, conv2d_44_layer_call_and_return_conditional_losses, re_lu_44_layer_call_fn, re_lu_44_layer_call_and_return_conditional_losses, conv2d_44_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_45_layer_call_fn, conv2d_45_layer_call_and_return_conditional_losses, re_lu_45_layer_call_fn, re_lu_45_layer_call_and_return_conditional_losses, conv2d_45_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_46_layer_call_fn, conv2d_46_layer_call_and_return_conditional_losses, re_lu_46_layer_call_fn, re_lu_46_layer_call_and_return_conditional_losses, conv2d_46_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_47_layer_call_fn, conv2d_47_layer_call_and_return_conditional_losses, re_lu_47_layer_call_fn, re_lu_47_layer_call_and_return_conditional_losses, conv2d_47_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_48_layer_call_fn, conv2d_48_layer_call_and_return_conditional_losses, re_lu_48_layer_call_fn, re_lu_48_layer_call_and_return_conditional_losses, conv2d_48_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_49_layer_call_fn, conv2d_49_layer_call_and_return_conditional_losses, re_lu_49_layer_call_fn, re_lu_49_layer_call_and_return_conditional_losses, conv2d_49_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_50_layer_call_fn, conv2d_50_layer_call_and_return_conditional_losses, re_lu_50_layer_call_fn, re_lu_50_layer_call_and_return_conditional_losses, conv2d_50_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_51_layer_call_fn, conv2d_51_layer_call_and_return_conditional_losses, re_lu_51_layer_call_fn, re_lu_51_layer_call_and_return_conditional_losses, conv2d_51_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_52_layer_call_fn, conv2d_52_layer_call_and_return_conditional_losses, re_lu_52_layer_call_fn, re_lu_52_layer_call_and_return_conditional_losses, conv2d_52_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_53_layer_call_fn, conv2d_53_layer_call_and_return_conditional_losses, re_lu_53_layer_call_fn, re_lu_53_layer_call_and_return_conditional_losses, conv2d_53_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_54_layer_call_fn, conv2d_54_layer_call_and_return_conditional_losses, re_lu_54_layer_call_fn, re_lu_54_layer_call_and_return_conditional_losses, conv2d_54_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_55_layer_call_fn, conv2d_55_layer_call_and_return_conditional_losses, re_lu_55_layer_call_fn, re_lu_55_layer_call_and_return_conditional_losses, conv2d_55_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_56_layer_call_fn, conv2d_56_layer_call_and_return_conditional_losses, re_lu_56_layer_call_fn, re_lu_56_layer_call_and_return_conditional_losses, conv2d_56_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_57_layer_call_fn, conv2d_57_layer_call_and_return_conditional_losses, re_lu_57_layer_call_fn, re_lu_57_layer_call_and_return_conditional_losses, conv2d_57_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_58_layer_call_fn, conv2d_58_layer_call_and_return_conditional_losses, re_lu_58_layer_call_fn, re_lu_58_layer_call_and_return_conditional_losses, conv2d_58_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_59_layer_call_fn, conv2d_59_layer_call_and_return_conditional_losses, re_lu_59_layer_call_fn, re_lu_59_layer_call_and_return_conditional_losses, conv2d_59_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_60_layer_call_fn, conv2d_60_layer_call_and_return_conditional_losses, re_lu_60_layer_call_fn, re_lu_60_layer_call_and_return_conditional_losses, conv2d_60_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_61_layer_call_fn, conv2d_61_layer_call_and_return_conditional_losses, re_lu_61_layer_call_fn, re_lu_61_layer_call_and_return_conditional_losses, conv2d_61_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_62_layer_call_fn, conv2d_62_layer_call_and_return_conditional_losses, re_lu_62_layer_call_fn, re_lu_62_layer_call_and_return_conditional_losses, conv2d_62_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_63_layer_call_fn, conv2d_63_layer_call_and_return_conditional_losses, re_lu_63_layer_call_fn, re_lu_63_layer_call_and_return_conditional_losses, conv2d_63_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_64_layer_call_fn, conv2d_64_layer_call_and_return_conditional_losses, re_lu_64_layer_call_fn, re_lu_64_layer_call_and_return_conditional_losses, conv2d_64_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_65_layer_call_fn, conv2d_65_layer_call_and_return_conditional_losses, re_lu_65_layer_call_fn, re_lu_65_layer_call_and_return_conditional_losses, conv2d_65_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_66_layer_call_fn, conv2d_66_layer_call_and_return_conditional_losses, re_lu_66_layer_call_fn, re_lu_66_layer_call_and_return_conditional_losses, conv2d_66_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_67_layer_call_fn, conv2d_67_layer_call_and_return_conditional_losses, re_lu_67_layer_call_fn, re_lu_67_layer_call_and_return_conditional_losses, conv2d_67_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_68_layer_call_fn, conv2d_68_layer_call_and_return_conditional_losses, re_lu_68_layer_call_fn, re_lu_68_layer_call_and_return_conditional_losses, conv2d_68_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_69_layer_call_fn, conv2d_69_layer_call_and_return_conditional_losses, re_lu_69_layer_call_fn, re_lu_69_layer_call_and_return_conditional_losses, conv2d_69_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_70_layer_call_fn, conv2d_70_layer_call_and_return_conditional_losses, re_lu_70_layer_call_fn, re_lu_70_layer_call_and_return_conditional_losses, conv2d_70_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_71_layer_call_fn, conv2d_71_layer_call_and_return_conditional_losses, re_lu_71_layer_call_fn, re_lu_71_layer_call_and_return_conditional_losses, conv2d_71_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_72_layer_call_fn, conv2d_72_layer_call_and_return_conditional_losses, re_lu_72_layer_call_fn, re_lu_72_layer_call_and_return_conditional_losses, conv2d_72_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_73_layer_call_fn, conv2d_73_layer_call_and_return_conditional_losses, re_lu_73_layer_call_fn, re_lu_73_layer_call_and_return_conditional_losses, conv2d_73_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_74_layer_call_fn, conv2d_74_layer_call_and_return_conditional_losses, re_lu_74_layer_call_fn, re_lu_74_layer_call_and_return_conditional_losses, conv2d_74_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_75_layer_call_fn, conv2d_75_layer_call_and_return_conditional_losses, re_lu_75_layer_call_fn, re_lu_75_layer_call_and_return_conditional_losses, conv2d_75_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_76_layer_call_fn, conv2d_76_layer_call_and_return_conditional_losses, re_lu_76_layer_call_fn, re_lu_76_layer_call_and_return_conditional_losses, conv2d_76_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_77_layer_call_fn, conv2d_77_layer_call_and_return_conditional_losses, re_lu_77_layer_call_fn, re_lu_77_layer_call_and_return_conditional_losses, conv2d_77_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_78_layer_call_fn, conv2d_78_layer_call_and_return_conditional_losses, re_lu_78_layer_call_fn, re_lu_78_layer_call_and_return_conditional_losses, conv2d_78_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_79_layer_call_fn, conv2d_79_layer_call_and_return_conditional_losses, re_lu_79_layer_call_fn, re_lu_79_layer_call_and_return_conditional_losses, conv2d_79_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_80_layer_call_fn, conv2d_80_layer_call_and_return_conditional_losses, re_lu_80_layer_call_fn, re_lu_80_layer_call_and_return_conditional_losses, conv2d_80_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_81_layer_call_fn, conv2d_81_layer_call_and_return_conditional_losses, re_lu_81_layer_call_fn, re_lu_81_layer_call_and_return_conditional_losses, conv2d_81_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_82_layer_call_fn, conv2d_82_layer_call_and_return_conditional_losses, re_lu_82_layer_call_fn, re_lu_82_layer_call_and_return_conditional_losses, conv2d_82_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_83_layer_call_fn, conv2d_83_layer_call_and_return_conditional_losses, re_lu_83_layer_call_fn, re_lu_83_layer_call_and_return_conditional_losses, conv2d_83_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_84_layer_call_fn, conv2d_84_layer_call_and_return_conditional_losses, re_lu_84_layer_call_fn, re_lu_84_layer_call_and_return_conditional_losses, conv2d_84_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_85_layer_call_fn, conv2d_85_layer_call_and_return_conditional_losses, re_lu_85_layer_call_fn, re_lu_85_layer_call_and_return_conditional_losses, conv2d_85_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_86_layer_call_fn, conv2d_86_layer_call_and_return_conditional_losses, re_lu_86_layer_call_fn, re_lu_86_layer_call_and_return_conditional_losses, conv2d_86_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_87_layer_call_fn, conv2d_87_layer_call_and_return_conditional_losses, re_lu_87_layer_call_fn, re_lu_87_layer_call_and_return_conditional_losses, conv2d_87_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_88_layer_call_fn, conv2d_88_layer_call_and_return_conditional_losses, re_lu_88_layer_call_fn, re_lu_88_layer_call_and_return_conditional_losses, conv2d_88_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_89_layer_call_fn, conv2d_89_layer_call_and_return_conditional_losses, re_lu_89_layer_call_fn, re_lu_89_layer_call_and_return_conditional_losses, conv2d_89_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_90_layer_call_fn, conv2d_90_layer_call_and_return_conditional_losses, re_lu_90_layer_call_fn, re_lu_90_layer_call_and_return_conditional_losses, conv2d_90_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_91_layer_call_fn, conv2d_91_layer_call_and_return_conditional_losses, re_lu_91_layer_call_fn, re_lu_91_layer_call_and_return_conditional_losses, conv2d_91_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_92_layer_call_fn, conv2d_92_layer_call_and_return_conditional_losses, re_lu_92_layer_call_fn, re_lu_92_layer_call_and_return_conditional_losses, conv2d_92_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_93_layer_call_fn, conv2d_93_layer_call_and_return_conditional_losses, re_lu_93_layer_call_fn, re_lu_93_layer_call_and_return_conditional_losses, conv2d_93_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_94_layer_call_fn, conv2d_94_layer_call_and_return_conditional_losses, re_lu_94_layer_call_fn, re_lu_94_layer_call_and_return_conditional_losses, conv2d_94_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_95_layer_call_fn, conv2d_95_layer_call_and_return_conditional_losses, re_lu_95_layer_call_fn, re_lu_95_layer_call_and_return_conditional_losses, conv2d_95_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_96_layer_call_fn, conv2d_96_layer_call_and_return_conditional_losses, re_lu_96_layer_call_fn, re_lu_96_layer_call_and_return_conditional_losses, conv2d_96_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_97_layer_call_fn, conv2d_97_layer_call_and_return_conditional_losses, re_lu_97_layer_call_fn, re_lu_97_layer_call_and_return_conditional_losses, conv2d_97_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_98_layer_call_fn, conv2d_98_layer_call_and_return_conditional_losses, re_lu_98_layer_call_fn, re_lu_98_layer_call_and_return_conditional_losses, conv2d_98_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_99_layer_call_fn, conv2d_99_layer_call_and_return_conditional_losses, re_lu_99_layer_call_fn, re_lu_99_layer_call_and_return_conditional_losses, conv2d_99_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_100_layer_call_fn, conv2d_100_layer_call_and_return_conditional_losses, re_lu_100_layer_call_fn, re_lu_100_layer_call_and_return_conditional_losses, conv2d_100_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_101_layer_call_fn, conv2d_101_layer_call_and_return_conditional_losses, re_lu_101_layer_call_fn, re_lu_101_layer_call_and_return_conditional_losses, conv2d_101_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_102_layer_call_fn, conv2d_102_layer_call_and_return_conditional_losses, re_lu_102_layer_call_fn, re_lu_102_layer_call_and_return_conditional_losses, conv2d_102_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_103_layer_call_fn, conv2d_103_layer_call_and_return_conditional_losses, re_lu_103_layer_call_fn, re_lu_103_layer_call_and_return_conditional_losses, conv2d_103_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_104_layer_call_fn, conv2d_104_layer_call_and_return_conditional_losses, re_lu_104_layer_call_fn, re_lu_104_layer_call_and_return_conditional_losses, conv2d_104_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_105_layer_call_fn, conv2d_105_layer_call_and_return_conditional_losses, re_lu_105_layer_call_fn, re_lu_105_layer_call_and_return_conditional_losses, conv2d_105_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_106_layer_call_fn, conv2d_106_layer_call_and_return_conditional_losses, re_lu_106_layer_call_fn, re_lu_106_layer_call_and_return_conditional_losses, conv2d_106_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_107_layer_call_fn, conv2d_107_layer_call_and_return_conditional_losses, re_lu_107_layer_call_fn, re_lu_107_layer_call_and_return_conditional_losses, conv2d_107_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_108_layer_call_fn, conv2d_108_layer_call_and_return_conditional_losses, re_lu_108_layer_call_fn, re_lu_108_layer_call_and_return_conditional_losses, conv2d_108_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_109_layer_call_fn, conv2d_109_layer_call_and_return_conditional_losses, re_lu_109_layer_call_fn, re_lu_109_layer_call_and_return_conditional_losses, conv2d_109_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_110_layer_call_fn, conv2d_110_layer_call_and_return_conditional_losses, re_lu_110_layer_call_fn, re_lu_110_layer_call_and_return_conditional_losses, conv2d_110_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_111_layer_call_fn, conv2d_111_layer_call_and_return_conditional_losses, re_lu_111_layer_call_fn, re_lu_111_layer_call_and_return_conditional_losses, conv2d_111_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_112_layer_call_fn, conv2d_112_layer_call_and_return_conditional_losses, re_lu_112_layer_call_fn, re_lu_112_layer_call_and_return_conditional_losses, conv2d_112_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_113_layer_call_fn, conv2d_113_layer_call_and_return_conditional_losses, re_lu_113_layer_call_fn, re_lu_113_layer_call_and_return_conditional_losses, conv2d_113_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_114_layer_call_fn, conv2d_114_layer_call_and_return_conditional_losses, re_lu_114_layer_call_fn, re_lu_114_layer_call_and_return_conditional_losses, conv2d_114_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_115_layer_call_fn, conv2d_115_layer_call_and_return_conditional_losses, re_lu_115_layer_call_fn, re_lu_115_layer_call_and_return_conditional_losses, conv2d_115_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_116_layer_call_fn, conv2d_116_layer_call_and_return_conditional_losses, re_lu_116_layer_call_fn, re_lu_116_layer_call_and_return_conditional_losses, conv2d_116_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_117_layer_call_fn, conv2d_117_layer_call_and_return_conditional_losses, re_lu_117_layer_call_fn, re_lu_117_layer_call_and_return_conditional_losses, conv2d_117_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_118_layer_call_fn, conv2d_118_layer_call_and_return_conditional_losses, re_lu_118_layer_call_fn, re_lu_118_layer_call_and_return_conditional_losses, conv2d_118_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_119_layer_call_fn, conv2d_119_layer_call_and_return_conditional_losses, re_lu_119_layer_call_fn, re_lu_119_layer_call_and_return_conditional_losses, conv2d_119_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_120_layer_call_fn, conv2d_120_layer_call_and_return_conditional_losses, re_lu_120_layer_call_fn, re_lu_120_layer_call_and_return_conditional_losses, conv2d_120_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_121_layer_call_fn, conv2d_121_layer_call_and_return_conditional_losses, re_lu_121_layer_call_fn, re_lu_121_layer_call_and_return_conditional_losses, conv2d_121_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_122_layer_call_fn, conv2d_122_layer_call_and_return_conditional_losses, re_lu_122_layer_call_fn, re_lu_122_layer_call_and_return_conditional_losses, conv2d_122_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_123_layer_call_fn, conv2d_123_layer_call_and_return_conditional_losses, re_lu_123_layer_call_fn, re_lu_123_layer_call_and_return_conditional_losses, conv2d_123_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_124_layer_call_fn, conv2d_124_layer_call_and_return_conditional_losses, re_lu_124_layer_call_fn, re_lu_124_layer_call_and_return_conditional_losses, conv2d_124_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_125_layer_call_fn, conv2d_125_layer_call_and_return_conditional_losses, re_lu_125_layer_call_fn, re_lu_125_layer_call_and_return_conditional_losses, conv2d_125_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_126_layer_call_fn, conv2d_126_layer_call_and_return_conditional_losses, re_lu_126_layer_call_fn, re_lu_126_layer_call_and_return_conditional_losses, conv2d_126_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_127_layer_call_fn, conv2d_127_layer_call_and_return_conditional_losses, re_lu_127_layer_call_fn, re_lu_127_layer_call_and_return_conditional_losses, conv2d_127_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_128_layer_call_fn, conv2d_128_layer_call_and_return_conditional_losses, re_lu_128_layer_call_fn, re_lu_128_layer_call_and_return_conditional_losses, conv2d_128_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_129_layer_call_fn, conv2d_129_layer_call_and_return_conditional_losses, re_lu_129_layer_call_fn, re_lu_129_layer_call_and_return_conditional_losses, conv2d_129_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_130_layer_call_fn, conv2d_130_layer_call_and_return_conditional_losses, re_lu_130_layer_call_fn, re_lu_130_layer_call_and_return_conditional_losses, conv2d_130_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_131_layer_call_fn, conv2d_131_layer_call_and_return_conditional_losses, re_lu_131_layer_call_fn, re_lu_131_layer_call_and_return_conditional_losses, conv2d_131_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_132_layer_call_fn, conv2d_132_layer_call_and_return_conditional_losses, re_lu_132_layer_call_fn, re_lu_132_layer_call_and_return_conditional_losses, conv2d_132_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_133_layer_call_fn, conv2d_133_layer_call_and_return_conditional_losses, re_lu_133_layer_call_fn, re_lu_133_layer_call_and_return_conditional_losses, conv2d_133_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_134_layer_call_fn, conv2d_134_layer_call_and_return_conditional_losses, re_lu_134_layer_call_fn, re_lu_134_layer_call_and_return_conditional_losses, conv2d_134_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_135_layer_call_fn, conv2d_135_layer_call_and_return_conditional_losses, re_lu_135_layer_call_fn, re_lu_135_layer_call_and_return_conditional_losses, conv2d_135_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_136_layer_call_fn, conv2d_136_layer_call_and_return_conditional_losses, re_lu_136_layer_call_fn, re_lu_136_layer_call_and_return_conditional_losses, conv2d_136_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_137_layer_call_fn, conv2d_137_layer_call_and_return_conditional_losses, re_lu_137_layer_call_fn, re_lu_137_layer_call_and_return_conditional_losses, conv2d_137_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_138_layer_call_fn, conv2d_138_layer_call_and_return_conditional_losses, re_lu_138_layer_call_fn, re_lu_138_layer_call_and_return_conditional_losses, conv2d_138_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_139_layer_call_fn, conv2d_139_layer_call_and_return_conditional_losses, re_lu_139_layer_call_fn, re_lu_139_layer_call_and_return_conditional_losses, conv2d_139_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_140_layer_call_fn, conv2d_140_layer_call_and_return_conditional_losses, re_lu_140_layer_call_fn, re_lu_140_layer_call_and_return_conditional_losses, conv2d_140_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_141_layer_call_fn, conv2d_141_layer_call_and_return_conditional_losses, re_lu_141_layer_call_fn, re_lu_141_layer_call_and_return_conditional_losses, conv2d_141_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_142_layer_call_fn, conv2d_142_layer_call_and_return_conditional_losses, re_lu_142_layer_call_fn, re_lu_142_layer_call_and_return_conditional_losses, conv2d_142_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_143_layer_call_fn, conv2d_143_layer_call_and_return_conditional_losses, re_lu_143_layer_call_fn, re_lu_143_layer_call_and_return_conditional_losses, conv2d_143_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_144_layer_call_fn, conv2d_144_layer_call_and_return_conditional_losses, re_lu_144_layer_call_fn, re_lu_144_layer_call_and_return_conditional_losses, conv2d_144_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_145_layer_call_fn, conv2d_145_layer_call_and_return_conditional_losses, re_lu_145_layer_call_fn, re_lu_145_layer_call_and_return_conditional_losses, conv2d_145_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_146_layer_call_fn, conv2d_146_layer_call_and_return_conditional_losses, re_lu_146_layer_call_fn, re_lu_146_layer_call_and_return_conditional_losses, conv2d_146_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_147_layer_call_fn, conv2d_147_layer_call_and_return_conditional_losses, re_lu_147_layer_call_fn, re_lu_147_layer_call_and_return_conditional_losses, conv2d_147_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_148_layer_call_fn, conv2d_148_layer_call_and_return_conditional_losses, re_lu_148_layer_call_fn, re_lu_148_layer_call_and_return_conditional_losses, conv2d_148_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_149_layer_call_fn, conv2d_149_layer_call_and_return_conditional_losses, re_lu_149_layer_call_fn, re_lu_149_layer_call_and_return_conditional_losses, conv2d_149_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_150_layer_call_fn, conv2d_150_layer_call_and_return_conditional_losses, re_lu_150_layer_call_fn, re_lu_150_layer_call_and_return_conditional_losses, conv2d_150_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_151_layer_call_fn, conv2d_151_layer_call_and_return_conditional_losses, re_lu_151_layer_call_fn, re_lu_151_layer_call_and_return_conditional_losses, conv2d_151_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_152_layer_call_fn, conv2d_152_layer_call_and_return_conditional_losses, re_lu_152_layer_call_fn, re_lu_152_layer_call_and_return_conditional_losses, conv2d_152_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_153_layer_call_fn, conv2d_153_layer_call_and_return_conditional_losses, re_lu_153_layer_call_fn, re_lu_153_layer_call_and_return_conditional_losses, conv2d_153_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_154_layer_call_fn, conv2d_154_layer_call_and_return_conditional_losses, re_lu_154_layer_call_fn, re_lu_154_layer_call_and_return_conditional_losses, conv2d_154_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_155_layer_call_fn, conv2d_155_layer_call_and_return_conditional_losses, re_lu_155_layer_call_fn, re_lu_155_layer_call_and_return_conditional_losses, conv2d_155_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_156_layer_call_fn, conv2d_156_layer_call_and_return_conditional_losses, re_lu_156_layer_call_fn, re_lu_156_layer_call_and_return_conditional_losses, conv2d_156_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_157_layer_call_fn, conv2d_157_layer_call_and_return_conditional_losses, re_lu_157_layer_call_fn, re_lu_157_layer_call_and_return_conditional_losses, conv2d_157_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_158_layer_call_fn, conv2d_158_layer_call_and_return_conditional_losses, re_lu_158_layer_call_fn, re_lu_158_layer_call_and_return_conditional_losses, conv2d_158_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_159_layer_call_fn, conv2d_159_layer_call_and_return_conditional_losses, re_lu_159_layer_call_fn, re_lu_159_layer_call_and_return_conditional_losses, conv2d_159_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_160_layer_call_fn, conv2d_160_layer_call_and_return_conditional_losses, re_lu_160_layer_call_fn, re_lu_160_layer_call_and_return_conditional_losses, conv2d_160_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_161_layer_call_fn, conv2d_161_layer_call_and_return_conditional_losses, re_lu_161_layer_call_fn, re_lu_161_layer_call_and_return_conditional_losses, conv2d_161_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_162_layer_call_fn, conv2d_162_layer_call_and_return_conditional_losses, re_lu_162_layer_call_fn, re_lu_162_layer_call_and_return_conditional_losses, conv2d_162_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_163_layer_call_fn, conv2d_163_layer_call_and_return_conditional_losses, re_lu_163_layer_call_fn, re_lu_163_layer_call_and_return_conditional_losses, conv2d_163_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_164_layer_call_fn, conv2d_164_layer_call_and_return_conditional_losses, re_lu_164_layer_call_fn, re_lu_164_layer_call_and_return_conditional_losses, conv2d_164_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_165_layer_call_fn, conv2d_165_layer_call_and_return_conditional_losses, re_lu_165_layer_call_fn, re_lu_165_layer_call_and_return_conditional_losses, conv2d_165_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_166_layer_call_fn, conv2d_166_layer_call_and_return_conditional_losses, re_lu_166_layer_call_fn, re_lu_166_layer_call_and_return_conditional_losses, conv2d_166_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_167_layer_call_fn, conv2d_167_layer_call_and_return_conditional_losses, re_lu_167_layer_call_fn, re_lu_167_layer_call_and_return_conditional_losses, conv2d_167_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_168_layer_call_fn, conv2d_168_layer_call_and_return_conditional_losses, re_lu_168_layer_call_fn, re_lu_168_layer_call_and_return_conditional_losses, conv2d_168_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_169_layer_call_fn, conv2d_169_layer_call_and_return_conditional_losses, re_lu_169_layer_call_fn, re_lu_169_layer_call_and_return_conditional_losses, conv2d_169_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_170_layer_call_fn, conv2d_170_layer_call_and_return_conditional_losses, re_lu_170_layer_call_fn, re_lu_170_layer_call_and_return_conditional_losses, conv2d_170_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_171_layer_call_fn, conv2d_171_layer_call_and_return_conditional_losses, re_lu_171_layer_call_fn, re_lu_171_layer_call_and_return_conditional_losses, conv2d_171_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_172_layer_call_fn, conv2d_172_layer_call_and_return_conditional_losses, re_lu_172_layer_call_fn, re_lu_172_layer_call_and_return_conditional_losses, conv2d_172_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_173_layer_call_fn, conv2d_173_layer_call_and_return_conditional_losses, re_lu_173_layer_call_fn, re_lu_173_layer_call_and_return_conditional_losses, conv2d_173_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_174_layer_call_fn, conv2d_174_layer_call_and_return_conditional_losses, re_lu_174_layer_call_fn, re_lu_174_layer_call_and_return_conditional_losses, conv2d_174_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_175_layer_call_fn, conv2d_175_layer_call_and_return_conditional_losses, re_lu_175_layer_call_fn, re_lu_175_layer_call_and_return_conditional_losses, conv2d_175_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_176_layer_call_fn, conv2d_176_layer_call_and_return_conditional_losses, re_lu_176_layer_call_fn, re_lu_176_layer_call_and_return_conditional_losses, conv2d_176_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_177_layer_call_fn, conv2d_177_layer_call_and_return_conditional_losses, re_lu_177_layer_call_fn, re_lu_177_layer_call_and_return_conditional_losses, conv2d_177_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_178_layer_call_fn, conv2d_178_layer_call_and_return_conditional_losses, re_lu_178_layer_call_fn, re_lu_178_layer_call_and_return_conditional_losses, conv2d_178_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_179_layer_call_fn, conv2d_179_layer_call_and_return_conditional_losses, re_lu_179_layer_call_fn, re_lu_179_layer_call_and_return_conditional_losses, conv2d_179_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_180_layer_call_fn, conv2d_180_layer_call_and_return_conditional_losses, re_lu_180_layer_call_fn, re_lu_180_layer_call_and_return_conditional_losses, conv2d_180_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_181_layer_call_fn, conv2d_181_layer_call_and_return_conditional_losses, re_lu_181_layer_call_fn, re_lu_181_layer_call_and_return_conditional_losses, conv2d_181_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_182_layer_call_fn, conv2d_182_layer_call_and_return_conditional_losses, re_lu_182_layer_call_fn, re_lu_182_layer_call_and_return_conditional_losses, conv2d_182_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_183_layer_call_fn, conv2d_183_layer_call_and_return_conditional_losses, re_lu_183_layer_call_fn, re_lu_183_layer_call_and_return_conditional_losses, conv2d_183_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_184_layer_call_fn, conv2d_184_layer_call_and_return_conditional_losses, re_lu_184_layer_call_fn, re_lu_184_layer_call_and_return_conditional_losses, conv2d_184_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_185_layer_call_fn, conv2d_185_layer_call_and_return_conditional_losses, re_lu_185_layer_call_fn, re_lu_185_layer_call_and_return_conditional_losses, conv2d_185_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_186_layer_call_fn, conv2d_186_layer_call_and_return_conditional_losses, re_lu_186_layer_call_fn, re_lu_186_layer_call_and_return_conditional_losses, conv2d_186_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_187_layer_call_fn, conv2d_187_layer_call_and_return_conditional_losses, re_lu_187_layer_call_fn, re_lu_187_layer_call_and_return_conditional_losses, conv2d_187_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_188_layer_call_fn, conv2d_188_layer_call_and_return_conditional_losses, re_lu_188_layer_call_fn, re_lu_188_layer_call_and_return_conditional_losses, conv2d_188_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_189_layer_call_fn, conv2d_189_layer_call_and_return_conditional_losses, re_lu_189_layer_call_fn, re_lu_189_layer_call_and_return_conditional_losses, conv2d_189_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_190_layer_call_fn, conv2d_190_layer_call_and_return_conditional_losses, re_lu_190_layer_call_fn, re_lu_190_layer_call_and_return_conditional_losses, conv2d_190_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_191_layer_call_fn, conv2d_191_layer_call_and_return_conditional_losses, re_lu_191_layer_call_fn, re_lu_191_layer_call_and_return_conditional_losses, conv2d_191_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_192_layer_call_fn, conv2d_192_layer_call_and_return_conditional_losses, re_lu_192_layer_call_fn, re_lu_192_layer_call_and_return_conditional_losses, conv2d_192_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_193_layer_call_fn, conv2d_193_layer_call_and_return_conditional_losses, re_lu_193_layer_call_fn, re_lu_193_layer_call_and_return_conditional_losses, conv2d_193_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_194_layer_call_fn, conv2d_194_layer_call_and_return_conditional_losses, re_lu_194_layer_call_fn, re_lu_194_layer_call_and_return_conditional_losses, conv2d_194_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_195_layer_call_fn, conv2d_195_layer_call_and_return_conditional_losses, re_lu_195_layer_call_fn, re_lu_195_layer_call_and_return_conditional_losses, conv2d_195_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_196_layer_call_fn, conv2d_196_layer_call_and_return_conditional_losses, re_lu_196_layer_call_fn, re_lu_196_layer_call_and_return_conditional_losses, conv2d_196_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_197_layer_call_fn, conv2d_197_layer_call_and_return_conditional_losses, re_lu_197_layer_call_fn, re_lu_197_layer_call_and_return_conditional_losses, conv2d_197_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_198_layer_call_fn, conv2d_198_layer_call_and_return_conditional_losses, re_lu_198_layer_call_fn, re_lu_198_layer_call_and_return_conditional_losses, conv2d_198_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_199_layer_call_fn, conv2d_199_layer_call_and_return_conditional_losses, re_lu_199_layer_call_fn, re_lu_199_layer_call_and_return_conditional_losses, conv2d_199_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_200_layer_call_fn, conv2d_200_layer_call_and_return_conditional_losses, re_lu_200_layer_call_fn, re_lu_200_layer_call_and_return_conditional_losses, conv2d_200_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_201_layer_call_fn, conv2d_201_layer_call_and_return_conditional_losses, re_lu_201_layer_call_fn, re_lu_201_layer_call_and_return_conditional_losses, conv2d_201_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_202_layer_call_fn, conv2d_202_layer_call_and_return_conditional_losses, re_lu_202_layer_call_fn, re_lu_202_layer_call_and_return_conditional_losses, conv2d_202_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_203_layer_call_fn, conv2d_203_layer_call_and_return_conditional_losses, re_lu_203_layer_call_fn, re_lu_203_layer_call_and_return_conditional_losses, conv2d_203_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_204_layer_call_fn, conv2d_204_layer_call_and_return_conditional_losses, re_lu_204_layer_call_fn, re_lu_204_layer_call_and_return_conditional_losses, conv2d_204_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_205_layer_call_fn, conv2d_205_layer_call_and_return_conditional_losses, re_lu_205_layer_call_fn, re_lu_205_layer_call_and_return_conditional_losses, conv2d_205_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_206_layer_call_fn, conv2d_206_layer_call_and_return_conditional_losses, re_lu_206_layer_call_fn, re_lu_206_layer_call_and_return_conditional_losses, conv2d_206_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_207_layer_call_fn, conv2d_207_layer_call_and_return_conditional_losses, re_lu_207_layer_call_fn, re_lu_207_layer_call_and_return_conditional_losses, conv2d_207_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_208_layer_call_fn, conv2d_208_layer_call_and_return_conditional_losses, re_lu_208_layer_call_fn, re_lu_208_layer_call_and_return_conditional_losses, conv2d_208_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_209_layer_call_fn, conv2d_209_layer_call_and_return_conditional_losses, re_lu_209_layer_call_fn, re_lu_209_layer_call_and_return_conditional_losses, conv2d_209_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_210_layer_call_fn, conv2d_210_layer_call_and_return_conditional_losses, re_lu_210_layer_call_fn, re_lu_210_layer_call_and_return_conditional_losses, conv2d_210_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_211_layer_call_fn, conv2d_211_layer_call_and_return_conditional_losses, re_lu_211_layer_call_fn, re_lu_211_layer_call_and_return_conditional_losses, conv2d_211_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_212_layer_call_fn, conv2d_212_layer_call_and_return_conditional_losses, re_lu_212_layer_call_fn, re_lu_212_layer_call_and_return_conditional_losses, conv2d_212_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_213_layer_call_fn, conv2d_213_layer_call_and_return_conditional_losses, re_lu_213_layer_call_fn, re_lu_213_layer_call_and_return_conditional_losses, conv2d_213_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_214_layer_call_fn, conv2d_214_layer_call_and_return_conditional_losses, re_lu_214_layer_call_fn, re_lu_214_layer_call_and_return_conditional_losses, conv2d_214_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_215_layer_call_fn, conv2d_215_layer_call_and_return_conditional_losses, re_lu_215_layer_call_fn, re_lu_215_layer_call_and_return_conditional_losses, conv2d_215_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_216_layer_call_fn, conv2d_216_layer_call_and_return_conditional_losses, re_lu_216_layer_call_fn, re_lu_216_layer_call_and_return_conditional_losses, conv2d_216_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_217_layer_call_fn, conv2d_217_layer_call_and_return_conditional_losses, re_lu_217_layer_call_fn, re_lu_217_layer_call_and_return_conditional_losses, conv2d_217_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_218_layer_call_fn, conv2d_218_layer_call_and_return_conditional_losses, re_lu_218_layer_call_fn, re_lu_218_layer_call_and_return_conditional_losses, conv2d_218_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_219_layer_call_fn, conv2d_219_layer_call_and_return_conditional_losses, re_lu_219_layer_call_fn, re_lu_219_layer_call_and_return_conditional_losses, conv2d_219_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_220_layer_call_fn, conv2d_220_layer_call_and_return_conditional_losses, re_lu_220_layer_call_fn, re_lu_220_layer_call_and_return_conditional_losses, conv2d_220_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_221_layer_call_fn, conv2d_221_layer_call_and_return_conditional_losses, re_lu_221_layer_call_fn, re_lu_221_layer_call_and_return_conditional_losses, conv2d_221_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_222_layer_call_fn, conv2d_222_layer_call_and_return_conditional_losses, re_lu_222_layer_call_fn, re_lu_222_layer_call_and_return_conditional_losses, conv2d_222_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_223_layer_call_fn, conv2d_223_layer_call_and_return_conditional_losses, re_lu_223_layer_call_fn, re_lu_223_layer_call_and_return_conditional_losses, conv2d_223_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_224_layer_call_fn, conv2d_224_layer_call_and_return_conditional_losses, re_lu_224_layer_call_fn, re_lu_224_layer_call_and_return_conditional_losses, conv2d_224_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_225_layer_call_fn, conv2d_225_layer_call_and_return_conditional_losses, re_lu_225_layer_call_fn, re_lu_225_layer_call_and_return_conditional_losses, conv2d_225_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_226_layer_call_fn, conv2d_226_layer_call_and_return_conditional_losses, re_lu_226_layer_call_fn, re_lu_226_layer_call_and_return_conditional_losses, conv2d_226_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_227_layer_call_fn, conv2d_227_layer_call_and_return_conditional_losses, re_lu_227_layer_call_fn, re_lu_227_layer_call_and_return_conditional_losses, conv2d_227_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_228_layer_call_fn, conv2d_228_layer_call_and_return_conditional_losses, re_lu_228_layer_call_fn, re_lu_228_layer_call_and_return_conditional_losses, conv2d_228_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_229_layer_call_fn, conv2d_229_layer_call_and_return_conditional_losses, re_lu_229_layer_call_fn, re_lu_229_layer_call_and_return_conditional_losses, conv2d_229_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_230_layer_call_fn, conv2d_230_layer_call_and_return_conditional_losses, re_lu_230_layer_call_fn, re_lu_230_layer_call_and_return_conditional_losses, conv2d_230_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_231_layer_call_fn, conv2d_231_layer_call_and_return_conditional_losses, re_lu_231_layer_call_fn, re_lu_231_layer_call_and_return_conditional_losses, conv2d_231_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_232_layer_call_fn, conv2d_232_layer_call_and_return_conditional_losses, re_lu_232_layer_call_fn, re_lu_232_layer_call_and_return_conditional_losses, conv2d_232_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_233_layer_call_fn, conv2d_233_layer_call_and_return_conditional_losses, re_lu_233_layer_call_fn, re_lu_233_layer_call_and_return_conditional_losses, conv2d_233_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_234_layer_call_fn, conv2d_234_layer_call_and_return_conditional_losses, re_lu_234_layer_call_fn, re_lu_234_layer_call_and_return_conditional_losses, conv2d_234_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_235_layer_call_fn, conv2d_235_layer_call_and_return_conditional_losses, re_lu_235_layer_call_fn, re_lu_235_layer_call_and_return_conditional_losses, conv2d_235_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_236_layer_call_fn, conv2d_236_layer_call_and_return_conditional_losses, re_lu_236_layer_call_fn, re_lu_236_layer_call_and_return_conditional_losses, conv2d_236_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_237_layer_call_fn, conv2d_237_layer_call_and_return_conditional_losses, re_lu_237_layer_call_fn, re_lu_237_layer_call_and_return_conditional_losses, conv2d_237_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_238_layer_call_fn, conv2d_238_layer_call_and_return_conditional_losses, re_lu_238_layer_call_fn, re_lu_238_layer_call_and_return_conditional_losses, conv2d_238_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_239_layer_call_fn, conv2d_239_layer_call_and_return_conditional_losses, re_lu_239_layer_call_fn, re_lu_239_layer_call_and_return_conditional_losses, conv2d_239_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_240_layer_call_fn, conv2d_240_layer_call_and_return_conditional_losses, re_lu_240_layer_call_fn, re_lu_240_layer_call_and_return_conditional_losses, conv2d_240_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_241_layer_call_fn, conv2d_241_layer_call_and_return_conditional_losses, re_lu_241_layer_call_fn, re_lu_241_layer_call_and_return_conditional_losses, conv2d_241_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_242_layer_call_fn, conv2d_242_layer_call_and_return_conditional_losses, re_lu_242_layer_call_fn, re_lu_242_layer_call_and_return_conditional_losses, conv2d_242_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_243_layer_call_fn, conv2d_243_layer_call_and_return_conditional_losses, re_lu_243_layer_call_fn, re_lu_243_layer_call_and_return_conditional_losses, conv2d_243_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_244_layer_call_fn, conv2d_244_layer_call_and_return_conditional_losses, re_lu_244_layer_call_fn, re_lu_244_layer_call_and_return_conditional_losses, conv2d_244_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_245_layer_call_fn, conv2d_245_layer_call_and_return_conditional_losses, re_lu_245_layer_call_fn, re_lu_245_layer_call_and_return_conditional_losses, conv2d_245_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_246_layer_call_fn, conv2d_246_layer_call_and_return_conditional_losses, re_lu_246_layer_call_fn, re_lu_246_layer_call_and_return_conditional_losses, conv2d_246_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_247_layer_call_fn, conv2d_247_layer_call_and_return_conditional_losses, re_lu_247_layer_call_fn, re_lu_247_layer_call_and_return_conditional_losses, conv2d_247_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_248_layer_call_fn, conv2d_248_layer_call_and_return_conditional_losses, re_lu_248_layer_call_fn, re_lu_248_layer_call_and_return_conditional_losses, conv2d_248_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_249_layer_call_fn, conv2d_249_layer_call_and_return_conditional_losses, re_lu_249_layer_call_fn, re_lu_249_layer_call_and_return_conditional_losses, conv2d_249_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_250_layer_call_fn, conv2d_250_layer_call_and_return_conditional_losses, re_lu_250_layer_call_fn, re_lu_250_layer_call_and_return_conditional_losses, conv2d_250_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_251_layer_call_fn, conv2d_251_layer_call_and_return_conditional_losses, re_lu_251_layer_call_fn, re_lu_251_layer_call_and_return_conditional_losses, conv2d_251_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_252_layer_call_fn, conv2d_252_layer_call_and_return_conditional_losses, re_lu_252_layer_call_fn, re_lu_252_layer_call_and_return_conditional_losses, conv2d_252_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_253_layer_call_fn, conv2d_253_layer_call_and_return_conditional_losses, re_lu_253_layer_call_fn, re_lu_253_layer_call_and_return_conditional_losses, conv2d_253_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_254_layer_call_fn, conv2d_254_layer_call_and_return_conditional_losses, re_lu_254_layer_call_fn, re_lu_254_layer_call_and_return_conditional_losses, conv2d_254_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_255_layer_call_fn, conv2d_255_layer_call_and_return_conditional_losses, re_lu_255_layer_call_fn, re_lu_255_layer_call_and_return_conditional_losses, conv2d_255_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_256_layer_call_fn, conv2d_256_layer_call_and_return_conditional_losses, re_lu_256_layer_call_fn, re_lu_256_layer_call_and_return_conditional_losses, conv2d_256_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_257_layer_call_fn, conv2d_257_layer_call_and_return_conditional_losses, re_lu_257_layer_call_fn, re_lu_257_layer_call_and_return_conditional_losses, conv2d_257_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_258_layer_call_fn, conv2d_258_layer_call_and_return_conditional_losses, re_lu_258_layer_call_fn, re_lu_258_layer_call_and_return_conditional_losses, conv2d_258_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_259_layer_call_fn, conv2d_259_layer_call_and_return_conditional_losses, re_lu_259_layer_call_fn, re_lu_259_layer_call_and_return_conditional_losses, conv2d_259_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_260_layer_call_fn, conv2d_260_layer_call_and_return_conditional_losses, re_lu_260_layer_call_fn, re_lu_260_layer_call_and_return_conditional_losses, conv2d_260_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_261_layer_call_fn, conv2d_261_layer_call_and_return_conditional_losses, re_lu_261_layer_call_fn, re_lu_261_layer_call_and_return_conditional_losses, conv2d_261_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_262_layer_call_fn, conv2d_262_layer_call_and_return_conditional_losses, re_lu_262_layer_call_fn, re_lu_262_layer_call_and_return_conditional_losses, conv2d_262_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_263_layer_call_fn, conv2d_263_layer_call_and_return_conditional_losses, re_lu_263_layer_call_fn, re_lu_263_layer_call_and_return_conditional_losses, conv2d_263_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_264_layer_call_fn, conv2d_264_layer_call_and_return_conditional_losses, re_lu_264_layer_call_fn, re_lu_264_layer_call_and_return_conditional_losses, conv2d_264_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_265_layer_call_fn, conv2d_265_layer_call_and_return_conditional_losses, re_lu_265_layer_call_fn, re_lu_265_layer_call_and_return_conditional_losses, conv2d_265_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_266_layer_call_fn, conv2d_266_layer_call_and_return_conditional_losses, re_lu_266_layer_call_fn, re_lu_266_layer_call_and_return_conditional_losses, conv2d_266_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_267_layer_call_fn, conv2d_267_layer_call_and_return_conditional_losses, re_lu_267_layer_call_fn, re_lu_267_layer_call_and_return_conditional_losses, conv2d_267_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_268_layer_call_fn, conv2d_268_layer_call_and_return_conditional_losses, re_lu_268_layer_call_fn, re_lu_268_layer_call_and_return_conditional_losses, conv2d_268_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_269_layer_call_fn, conv2d_269_layer_call_and_return_conditional_losses, re_lu_269_layer_call_fn, re_lu_269_layer_call_and_return_conditional_losses, conv2d_269_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_270_layer_call_fn, conv2d_270_layer_call_and_return_conditional_losses, re_lu_270_layer_call_fn, re_lu_270_layer_call_and_return_conditional_losses, conv2d_270_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_271_layer_call_fn, conv2d_271_layer_call_and_return_conditional_losses, re_lu_271_layer_call_fn, re_lu_271_layer_call_and_return_conditional_losses, conv2d_271_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_272_layer_call_fn, conv2d_272_layer_call_and_return_conditional_losses, re_lu_272_layer_call_fn, re_lu_272_layer_call_and_return_conditional_losses, conv2d_272_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_273_layer_call_fn, conv2d_273_layer_call_and_return_conditional_losses, re_lu_273_layer_call_fn, re_lu_273_layer_call_and_return_conditional_losses, conv2d_273_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_274_layer_call_fn, conv2d_274_layer_call_and_return_conditional_losses, re_lu_274_layer_call_fn, re_lu_274_layer_call_and_return_conditional_losses, conv2d_274_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_275_layer_call_fn, conv2d_275_layer_call_and_return_conditional_losses, re_lu_275_layer_call_fn, re_lu_275_layer_call_and_return_conditional_losses, conv2d_275_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_276_layer_call_fn, conv2d_276_layer_call_and_return_conditional_losses, re_lu_276_layer_call_fn, re_lu_276_layer_call_and_return_conditional_losses, conv2d_276_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_277_layer_call_fn, conv2d_277_layer_call_and_return_conditional_losses, re_lu_277_layer_call_fn, re_lu_277_layer_call_and_return_conditional_losses, conv2d_277_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_278_layer_call_fn, conv2d_278_layer_call_and_return_conditional_losses, re_lu_278_layer_call_fn, re_lu_278_layer_call_and_return_conditional_losses, conv2d_278_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_279_layer_call_fn, conv2d_279_layer_call_and_return_conditional_losses, re_lu_279_layer_call_fn, re_lu_279_layer_call_and_return_conditional_losses, conv2d_279_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_280_layer_call_fn, conv2d_280_layer_call_and_return_conditional_losses, re_lu_280_layer_call_fn, re_lu_280_layer_call_and_return_conditional_losses, conv2d_280_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_281_layer_call_fn, conv2d_281_layer_call_and_return_conditional_losses, re_lu_281_layer_call_fn, re_lu_281_layer_call_and_return_conditional_losses, conv2d_281_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_282_layer_call_fn, conv2d_282_layer_call_and_return_conditional_losses, re_lu_282_layer_call_fn, re_lu_282_layer_call_and_return_conditional_losses, conv2d_282_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_283_layer_call_fn, conv2d_283_layer_call_and_return_conditional_losses, re_lu_283_layer_call_fn, re_lu_283_layer_call_and_return_conditional_losses, conv2d_283_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_284_layer_call_fn, conv2d_284_layer_call_and_return_conditional_losses, re_lu_284_layer_call_fn, re_lu_284_layer_call_and_return_conditional_losses, conv2d_284_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_285_layer_call_fn, conv2d_285_layer_call_and_return_conditional_losses, re_lu_285_layer_call_fn, re_lu_285_layer_call_and_return_conditional_losses, conv2d_285_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_286_layer_call_fn, conv2d_286_layer_call_and_return_conditional_losses, re_lu_286_layer_call_fn, re_lu_286_layer_call_and_return_conditional_losses, conv2d_286_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_287_layer_call_fn, conv2d_287_layer_call_and_return_conditional_losses, re_lu_287_layer_call_fn, re_lu_287_layer_call_and_return_conditional_losses, conv2d_287_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_288_layer_call_fn, conv2d_288_layer_call_and_return_conditional_losses, re_lu_288_layer_call_fn, re_lu_288_layer_call_and_return_conditional_losses, conv2d_288_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_289_layer_call_fn, conv2d_289_layer_call_and_return_conditional_losses, re_lu_289_layer_call_fn, re_lu_289_layer_call_and_return_conditional_losses, conv2d_289_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_290_layer_call_fn, conv2d_290_layer_call_and_return_conditional_losses, re_lu_290_layer_call_fn, re_lu_290_layer_call_and_return_conditional_losses, conv2d_290_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_291_layer_call_fn, conv2d_291_layer_call_and_return_conditional_losses, re_lu_291_layer_call_fn, re_lu_291_layer_call_and_return_conditional_losses, conv2d_291_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_292_layer_call_fn, conv2d_292_layer_call_and_return_conditional_losses, re_lu_292_layer_call_fn, re_lu_292_layer_call_and_return_conditional_losses, conv2d_292_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_293_layer_call_fn, conv2d_293_layer_call_and_return_conditional_losses, re_lu_293_layer_call_fn, re_lu_293_layer_call_and_return_conditional_losses, conv2d_293_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_294_layer_call_fn, conv2d_294_layer_call_and_return_conditional_losses, re_lu_294_layer_call_fn, re_lu_294_layer_call_and_return_conditional_losses, conv2d_294_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_295_layer_call_fn, conv2d_295_layer_call_and_return_conditional_losses, re_lu_295_layer_call_fn, re_lu_295_layer_call_and_return_conditional_losses, conv2d_295_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_296_layer_call_fn, conv2d_296_layer_call_and_return_conditional_losses, re_lu_296_layer_call_fn, re_lu_296_layer_call_and_return_conditional_losses, conv2d_296_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_297_layer_call_fn, conv2d_297_layer_call_and_return_conditional_losses, re_lu_297_layer_call_fn, re_lu_297_layer_call_and_return_conditional_losses, conv2d_297_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_298_layer_call_fn, conv2d_298_layer_call_and_return_conditional_losses, re_lu_298_layer_call_fn, re_lu_298_layer_call_and_return_conditional_losses, conv2d_298_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# build latency predictor for kernel\n",
|
||||
"from nn_meter.builder import build_predictor_for_kernel\n",
|
||||
"kernel_type = \"conv_bn_relu\"\n",
|
||||
"backend = \"tflite_cpu\"\n",
|
||||
"\n",
|
||||
"predictor, data = build_predictor_for_kernel(\n",
|
||||
" kernel_type, backend, init_sample_num = 10, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "b1e6394117d1f9946b933e0217b031866a6d7d9d3d678e5971126c2dcc137dfa"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6.10 64-bit ('py36-Jiahang': conda)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,489 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import warnings\n",
|
||||
"from silence_tensorflow import silence_tensorflow\n",
|
||||
"warnings.filterwarnings('ignore')\n",
|
||||
"silence_tensorflow()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The first step to run fusion rule tester is to prepare a backends and create a workspace for experiment. Users could create a workspace folder by running the following command:\n",
|
||||
"\n",
|
||||
"``` Bash\n",
|
||||
"# for TFLite platform\n",
|
||||
"nn-meter create --tflite-workspace <path/to/place/workspace/>\n",
|
||||
"\n",
|
||||
"# for OpenVINO platform\n",
|
||||
"nn-meter create --openvino-workspace <path/to/place/workspace/>\n",
|
||||
"\n",
|
||||
"# for customized platform\n",
|
||||
"nn-meter create --customized-workspace <backend-name> <path/to/place/workspace/>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"After running the command, a workspace folder will be created and two yaml files named `backend_config.yaml` and `ruletest_config.yaml`, i.e., configuration file for backend and fusion_rule_tester respectively, will be placed in `<workspace-path>/configs/`. Users could edit the content to change configuration. The config will take effect after the the config file is saved and closed.\n",
|
||||
"\n",
|
||||
"After creating the workspace and completing configuration, users could initialize workspace in `builder_config` module for experiments:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# initialize workspace in code\n",
|
||||
"workspace_path = \"~/working/tftest/\" # text the path to the workspace folder. refer to ./backend.md for further information.\n",
|
||||
"from nn_meter.builder import builder_config\n",
|
||||
"builder_config.init(workspace_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"After creating the workspace, a yaml file named `ruletest_config.yaml` will be placed in `<workspace-path>/configs/`. Users could open `<workspace-path>/configs/ruletest_config.yaml` and edit the content. The config will take effect after the the config file is saved and closed.\n",
|
||||
"\n",
|
||||
"Following configuration from `<workspace-path>/configs/ruletest_config.yaml`, the test cases can be created by running:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:absl:Found untraced functions such as conv2d_14_layer_call_fn, conv2d_14_layer_call_and_return_conditional_losses, conv2d_15_layer_call_fn, conv2d_15_layer_call_and_return_conditional_losses, conv2d_14_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_14_layer_call_fn, conv2d_14_layer_call_and_return_conditional_losses, conv2d_15_layer_call_fn, conv2d_15_layer_call_and_return_conditional_losses, conv2d_14_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_20_layer_call_fn, conv2d_20_layer_call_and_return_conditional_losses, conv2d_21_layer_call_fn, conv2d_21_layer_call_and_return_conditional_losses, conv2d_20_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_20_layer_call_fn, conv2d_20_layer_call_and_return_conditional_losses, conv2d_21_layer_call_fn, conv2d_21_layer_call_and_return_conditional_losses, conv2d_20_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# generate testcases\n",
|
||||
"from nn_meter.builder.backend_meta.fusion_rule_tester import generate_testcases\n",
|
||||
"origin_testcases = generate_testcases()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The test case models will be saved in `<workspace-path>/ruletest_testcases/`, and the test case dictionary will be saved in `<workspace-path>/results/origin_testcases.json`. \n",
|
||||
"\n",
|
||||
"If the test cases has been created before, users could directly use the generated json file of test cases to avoid creating test cases again:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"origin_testcases = os.path.join(workspace_path, \"fusion_rule_test\", \"results\", \"origin_testcases.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next step is to run and profile test cases on the backend. Given required backend, users could run test cases model and get profiled latency value by running:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_layer_call_fn, add_layer_call_and_return_conditional_losses, add_layer_call_fn, add_layer_call_and_return_conditional_losses, add_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_1_layer_call_fn, add_1_layer_call_and_return_conditional_losses, add_1_layer_call_fn, add_1_layer_call_and_return_conditional_losses, add_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1, inputs/2 with unsupported characters which will be renamed to inputs_0, inputs_1, inputs_2 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_layer_call_fn, add_layer_call_and_return_conditional_losses, add_1_layer_call_fn, add_1_layer_call_and_return_conditional_losses, add_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_2_layer_call_fn, add_2_layer_call_and_return_conditional_losses, add_2_layer_call_fn, add_2_layer_call_and_return_conditional_losses, add_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_2_layer_call_fn, add_2_layer_call_and_return_conditional_losses, add_2_layer_call_fn, add_2_layer_call_and_return_conditional_losses, add_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_3_layer_call_fn, add_3_layer_call_and_return_conditional_losses, add_3_layer_call_fn, add_3_layer_call_and_return_conditional_losses, add_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_layer_call_fn, concatenate_layer_call_and_return_conditional_losses, concatenate_layer_call_fn, concatenate_layer_call_and_return_conditional_losses, concatenate_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1, inputs/2 with unsupported characters which will be renamed to inputs_0, inputs_1, inputs_2 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_3_layer_call_fn, add_3_layer_call_and_return_conditional_losses, concatenate_layer_call_fn, concatenate_layer_call_and_return_conditional_losses, add_3_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_4_layer_call_fn, add_4_layer_call_and_return_conditional_losses, add_4_layer_call_fn, add_4_layer_call_and_return_conditional_losses, add_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_layer_call_fn, conv2d_layer_call_and_return_conditional_losses, conv2d_layer_call_fn, conv2d_layer_call_and_return_conditional_losses, conv2d_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_4_layer_call_fn, add_4_layer_call_and_return_conditional_losses, conv2d_layer_call_fn, conv2d_layer_call_and_return_conditional_losses, add_4_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_5_layer_call_fn, add_5_layer_call_and_return_conditional_losses, add_5_layer_call_fn, add_5_layer_call_and_return_conditional_losses, add_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_5_layer_call_fn, add_5_layer_call_and_return_conditional_losses, add_5_layer_call_fn, add_5_layer_call_and_return_conditional_losses, add_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_6_layer_call_fn, add_6_layer_call_and_return_conditional_losses, add_6_layer_call_fn, add_6_layer_call_and_return_conditional_losses, add_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_layer_call_fn, dense_layer_call_and_return_conditional_losses, dense_layer_call_fn, dense_layer_call_and_return_conditional_losses, dense_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_6_layer_call_fn, add_6_layer_call_and_return_conditional_losses, dense_layer_call_fn, dense_layer_call_and_return_conditional_losses, add_6_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_7_layer_call_fn, add_7_layer_call_and_return_conditional_losses, add_7_layer_call_fn, add_7_layer_call_and_return_conditional_losses, add_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_7_layer_call_fn, add_7_layer_call_and_return_conditional_losses, add_7_layer_call_fn, add_7_layer_call_and_return_conditional_losses, add_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_8_layer_call_fn, add_8_layer_call_and_return_conditional_losses, add_8_layer_call_fn, add_8_layer_call_and_return_conditional_losses, add_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_layer_call_fn, re_lu_layer_call_and_return_conditional_losses, re_lu_layer_call_fn, re_lu_layer_call_and_return_conditional_losses, re_lu_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_8_layer_call_fn, add_8_layer_call_and_return_conditional_losses, re_lu_layer_call_fn, re_lu_layer_call_and_return_conditional_losses, add_8_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_9_layer_call_fn, add_9_layer_call_and_return_conditional_losses, add_9_layer_call_fn, add_9_layer_call_and_return_conditional_losses, add_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_9_layer_call_fn, add_9_layer_call_and_return_conditional_losses, add_9_layer_call_fn, add_9_layer_call_and_return_conditional_losses, add_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_10_layer_call_fn, add_10_layer_call_and_return_conditional_losses, add_10_layer_call_fn, add_10_layer_call_and_return_conditional_losses, add_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_10_layer_call_fn, add_10_layer_call_and_return_conditional_losses, add_10_layer_call_fn, add_10_layer_call_and_return_conditional_losses, add_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_1_layer_call_fn, concatenate_1_layer_call_and_return_conditional_losses, concatenate_1_layer_call_fn, concatenate_1_layer_call_and_return_conditional_losses, concatenate_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_1_layer_call_fn, concatenate_1_layer_call_and_return_conditional_losses, concatenate_1_layer_call_fn, concatenate_1_layer_call_and_return_conditional_losses, concatenate_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_1_layer_call_fn, conv2d_1_layer_call_and_return_conditional_losses, conv2d_1_layer_call_fn, conv2d_1_layer_call_and_return_conditional_losses, conv2d_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_1_layer_call_fn, conv2d_1_layer_call_and_return_conditional_losses, conv2d_1_layer_call_fn, conv2d_1_layer_call_and_return_conditional_losses, conv2d_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_1_layer_call_fn, re_lu_1_layer_call_and_return_conditional_losses, re_lu_1_layer_call_fn, re_lu_1_layer_call_and_return_conditional_losses, re_lu_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_1_layer_call_fn, re_lu_1_layer_call_and_return_conditional_losses, re_lu_1_layer_call_fn, re_lu_1_layer_call_and_return_conditional_losses, re_lu_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_2_layer_call_fn, concatenate_2_layer_call_and_return_conditional_losses, concatenate_2_layer_call_fn, concatenate_2_layer_call_and_return_conditional_losses, concatenate_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_11_layer_call_fn, add_11_layer_call_and_return_conditional_losses, add_11_layer_call_fn, add_11_layer_call_and_return_conditional_losses, add_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1, inputs/2 with unsupported characters which will be renamed to inputs_0, inputs_1, inputs_2 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_2_layer_call_fn, concatenate_2_layer_call_and_return_conditional_losses, add_11_layer_call_fn, add_11_layer_call_and_return_conditional_losses, concatenate_2_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_3_layer_call_fn, concatenate_3_layer_call_and_return_conditional_losses, concatenate_3_layer_call_fn, concatenate_3_layer_call_and_return_conditional_losses, concatenate_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_3_layer_call_fn, concatenate_3_layer_call_and_return_conditional_losses, concatenate_3_layer_call_fn, concatenate_3_layer_call_and_return_conditional_losses, concatenate_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_4_layer_call_fn, concatenate_4_layer_call_and_return_conditional_losses, concatenate_4_layer_call_fn, concatenate_4_layer_call_and_return_conditional_losses, concatenate_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_5_layer_call_fn, concatenate_5_layer_call_and_return_conditional_losses, concatenate_5_layer_call_fn, concatenate_5_layer_call_and_return_conditional_losses, concatenate_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1, inputs/2 with unsupported characters which will be renamed to inputs_0, inputs_1, inputs_2 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_4_layer_call_fn, concatenate_4_layer_call_and_return_conditional_losses, concatenate_5_layer_call_fn, concatenate_5_layer_call_and_return_conditional_losses, concatenate_4_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_6_layer_call_fn, concatenate_6_layer_call_and_return_conditional_losses, concatenate_6_layer_call_fn, concatenate_6_layer_call_and_return_conditional_losses, concatenate_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_2_layer_call_fn, conv2d_2_layer_call_and_return_conditional_losses, conv2d_2_layer_call_fn, conv2d_2_layer_call_and_return_conditional_losses, conv2d_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_6_layer_call_fn, concatenate_6_layer_call_and_return_conditional_losses, conv2d_2_layer_call_fn, conv2d_2_layer_call_and_return_conditional_losses, concatenate_6_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_7_layer_call_fn, concatenate_7_layer_call_and_return_conditional_losses, concatenate_7_layer_call_fn, concatenate_7_layer_call_and_return_conditional_losses, concatenate_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_7_layer_call_fn, concatenate_7_layer_call_and_return_conditional_losses, concatenate_7_layer_call_fn, concatenate_7_layer_call_and_return_conditional_losses, concatenate_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_8_layer_call_fn, concatenate_8_layer_call_and_return_conditional_losses, concatenate_8_layer_call_fn, concatenate_8_layer_call_and_return_conditional_losses, concatenate_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_1_layer_call_fn, dense_1_layer_call_and_return_conditional_losses, dense_1_layer_call_fn, dense_1_layer_call_and_return_conditional_losses, dense_1_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_8_layer_call_fn, concatenate_8_layer_call_and_return_conditional_losses, dense_1_layer_call_fn, dense_1_layer_call_and_return_conditional_losses, concatenate_8_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_9_layer_call_fn, concatenate_9_layer_call_and_return_conditional_losses, concatenate_9_layer_call_fn, concatenate_9_layer_call_and_return_conditional_losses, concatenate_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_9_layer_call_fn, concatenate_9_layer_call_and_return_conditional_losses, concatenate_9_layer_call_fn, concatenate_9_layer_call_and_return_conditional_losses, concatenate_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_10_layer_call_fn, concatenate_10_layer_call_and_return_conditional_losses, concatenate_10_layer_call_fn, concatenate_10_layer_call_and_return_conditional_losses, concatenate_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_2_layer_call_fn, re_lu_2_layer_call_and_return_conditional_losses, re_lu_2_layer_call_fn, re_lu_2_layer_call_and_return_conditional_losses, re_lu_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_10_layer_call_fn, concatenate_10_layer_call_and_return_conditional_losses, re_lu_2_layer_call_fn, re_lu_2_layer_call_and_return_conditional_losses, concatenate_10_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_11_layer_call_fn, concatenate_11_layer_call_and_return_conditional_losses, concatenate_11_layer_call_fn, concatenate_11_layer_call_and_return_conditional_losses, concatenate_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_11_layer_call_fn, concatenate_11_layer_call_and_return_conditional_losses, concatenate_11_layer_call_fn, concatenate_11_layer_call_and_return_conditional_losses, concatenate_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_3_layer_call_fn, conv2d_3_layer_call_and_return_conditional_losses, conv2d_3_layer_call_fn, conv2d_3_layer_call_and_return_conditional_losses, conv2d_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_12_layer_call_fn, add_12_layer_call_and_return_conditional_losses, add_12_layer_call_fn, add_12_layer_call_and_return_conditional_losses, add_12_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_3_layer_call_fn, conv2d_3_layer_call_and_return_conditional_losses, add_12_layer_call_fn, add_12_layer_call_and_return_conditional_losses, conv2d_3_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_4_layer_call_fn, conv2d_4_layer_call_and_return_conditional_losses, conv2d_4_layer_call_fn, conv2d_4_layer_call_and_return_conditional_losses, conv2d_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_4_layer_call_fn, conv2d_4_layer_call_and_return_conditional_losses, conv2d_4_layer_call_fn, conv2d_4_layer_call_and_return_conditional_losses, conv2d_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_5_layer_call_fn, conv2d_5_layer_call_and_return_conditional_losses, conv2d_5_layer_call_fn, conv2d_5_layer_call_and_return_conditional_losses, conv2d_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_12_layer_call_fn, concatenate_12_layer_call_and_return_conditional_losses, concatenate_12_layer_call_fn, concatenate_12_layer_call_and_return_conditional_losses, concatenate_12_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_5_layer_call_fn, conv2d_5_layer_call_and_return_conditional_losses, concatenate_12_layer_call_fn, concatenate_12_layer_call_and_return_conditional_losses, conv2d_5_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_6_layer_call_fn, conv2d_6_layer_call_and_return_conditional_losses, conv2d_6_layer_call_fn, conv2d_6_layer_call_and_return_conditional_losses, conv2d_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_7_layer_call_fn, conv2d_7_layer_call_and_return_conditional_losses, conv2d_7_layer_call_fn, conv2d_7_layer_call_and_return_conditional_losses, conv2d_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_6_layer_call_fn, conv2d_6_layer_call_and_return_conditional_losses, conv2d_7_layer_call_fn, conv2d_7_layer_call_and_return_conditional_losses, conv2d_6_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_8_layer_call_fn, conv2d_8_layer_call_and_return_conditional_losses, conv2d_8_layer_call_fn, conv2d_8_layer_call_and_return_conditional_losses, conv2d_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_8_layer_call_fn, conv2d_8_layer_call_and_return_conditional_losses, conv2d_8_layer_call_fn, conv2d_8_layer_call_and_return_conditional_losses, conv2d_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_9_layer_call_fn, conv2d_9_layer_call_and_return_conditional_losses, conv2d_9_layer_call_fn, conv2d_9_layer_call_and_return_conditional_losses, conv2d_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_9_layer_call_fn, conv2d_9_layer_call_and_return_conditional_losses, conv2d_9_layer_call_fn, conv2d_9_layer_call_and_return_conditional_losses, conv2d_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_10_layer_call_fn, conv2d_10_layer_call_and_return_conditional_losses, conv2d_10_layer_call_fn, conv2d_10_layer_call_and_return_conditional_losses, conv2d_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_10_layer_call_fn, conv2d_10_layer_call_and_return_conditional_losses, conv2d_10_layer_call_fn, conv2d_10_layer_call_and_return_conditional_losses, conv2d_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_11_layer_call_fn, conv2d_11_layer_call_and_return_conditional_losses, conv2d_11_layer_call_fn, conv2d_11_layer_call_and_return_conditional_losses, conv2d_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_3_layer_call_fn, re_lu_3_layer_call_and_return_conditional_losses, re_lu_3_layer_call_fn, re_lu_3_layer_call_and_return_conditional_losses, re_lu_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_11_layer_call_fn, conv2d_11_layer_call_and_return_conditional_losses, re_lu_3_layer_call_fn, re_lu_3_layer_call_and_return_conditional_losses, conv2d_11_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_12_layer_call_fn, conv2d_12_layer_call_and_return_conditional_losses, conv2d_12_layer_call_fn, conv2d_12_layer_call_and_return_conditional_losses, conv2d_12_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_12_layer_call_fn, conv2d_12_layer_call_and_return_conditional_losses, conv2d_12_layer_call_fn, conv2d_12_layer_call_and_return_conditional_losses, conv2d_12_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_13_layer_call_fn, conv2d_13_layer_call_and_return_conditional_losses, conv2d_13_layer_call_fn, conv2d_13_layer_call_and_return_conditional_losses, conv2d_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_14_layer_call_fn, conv2d_14_layer_call_and_return_conditional_losses, conv2d_15_layer_call_fn, conv2d_15_layer_call_and_return_conditional_losses, conv2d_14_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_13_layer_call_fn, conv2d_13_layer_call_and_return_conditional_losses, conv2d_13_layer_call_fn, conv2d_13_layer_call_and_return_conditional_losses, conv2d_13_layer_call_and_return_conditional_losses while saving (showing 5 of 15). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_13_layer_call_fn, add_13_layer_call_and_return_conditional_losses, add_13_layer_call_fn, add_13_layer_call_and_return_conditional_losses, add_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_13_layer_call_fn, add_13_layer_call_and_return_conditional_losses, add_13_layer_call_fn, add_13_layer_call_and_return_conditional_losses, add_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_13_layer_call_fn, concatenate_13_layer_call_and_return_conditional_losses, concatenate_13_layer_call_fn, concatenate_13_layer_call_and_return_conditional_losses, concatenate_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_13_layer_call_fn, concatenate_13_layer_call_and_return_conditional_losses, concatenate_13_layer_call_fn, concatenate_13_layer_call_and_return_conditional_losses, concatenate_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_16_layer_call_fn, conv2d_16_layer_call_and_return_conditional_losses, conv2d_16_layer_call_fn, conv2d_16_layer_call_and_return_conditional_losses, conv2d_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_16_layer_call_fn, conv2d_16_layer_call_and_return_conditional_losses, conv2d_16_layer_call_fn, conv2d_16_layer_call_and_return_conditional_losses, conv2d_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_4_layer_call_fn, re_lu_4_layer_call_and_return_conditional_losses, re_lu_4_layer_call_fn, re_lu_4_layer_call_and_return_conditional_losses, re_lu_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_4_layer_call_fn, re_lu_4_layer_call_and_return_conditional_losses, re_lu_4_layer_call_fn, re_lu_4_layer_call_and_return_conditional_losses, re_lu_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_2_layer_call_fn, dense_2_layer_call_and_return_conditional_losses, dense_2_layer_call_fn, dense_2_layer_call_and_return_conditional_losses, dense_2_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_14_layer_call_fn, add_14_layer_call_and_return_conditional_losses, add_14_layer_call_fn, add_14_layer_call_and_return_conditional_losses, add_14_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_2_layer_call_fn, dense_2_layer_call_and_return_conditional_losses, add_14_layer_call_fn, add_14_layer_call_and_return_conditional_losses, dense_2_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_3_layer_call_fn, dense_3_layer_call_and_return_conditional_losses, dense_3_layer_call_fn, dense_3_layer_call_and_return_conditional_losses, dense_3_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_14_layer_call_fn, concatenate_14_layer_call_and_return_conditional_losses, concatenate_14_layer_call_fn, concatenate_14_layer_call_and_return_conditional_losses, concatenate_14_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_3_layer_call_fn, dense_3_layer_call_and_return_conditional_losses, concatenate_14_layer_call_fn, concatenate_14_layer_call_and_return_conditional_losses, dense_3_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_4_layer_call_fn, dense_4_layer_call_and_return_conditional_losses, dense_4_layer_call_fn, dense_4_layer_call_and_return_conditional_losses, dense_4_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_5_layer_call_fn, dense_5_layer_call_and_return_conditional_losses, dense_5_layer_call_fn, dense_5_layer_call_and_return_conditional_losses, dense_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_4_layer_call_fn, dense_4_layer_call_and_return_conditional_losses, dense_5_layer_call_fn, dense_5_layer_call_and_return_conditional_losses, dense_4_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_6_layer_call_fn, dense_6_layer_call_and_return_conditional_losses, dense_6_layer_call_fn, dense_6_layer_call_and_return_conditional_losses, dense_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_5_layer_call_fn, re_lu_5_layer_call_and_return_conditional_losses, re_lu_5_layer_call_fn, re_lu_5_layer_call_and_return_conditional_losses, re_lu_5_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_6_layer_call_fn, dense_6_layer_call_and_return_conditional_losses, re_lu_5_layer_call_fn, re_lu_5_layer_call_and_return_conditional_losses, dense_6_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_15_layer_call_fn, add_15_layer_call_and_return_conditional_losses, add_15_layer_call_fn, add_15_layer_call_and_return_conditional_losses, add_15_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_15_layer_call_fn, add_15_layer_call_and_return_conditional_losses, add_15_layer_call_fn, add_15_layer_call_and_return_conditional_losses, add_15_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_15_layer_call_fn, concatenate_15_layer_call_and_return_conditional_losses, concatenate_15_layer_call_fn, concatenate_15_layer_call_and_return_conditional_losses, concatenate_15_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_15_layer_call_fn, concatenate_15_layer_call_and_return_conditional_losses, concatenate_15_layer_call_fn, concatenate_15_layer_call_and_return_conditional_losses, concatenate_15_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_17_layer_call_fn, conv2d_17_layer_call_and_return_conditional_losses, conv2d_17_layer_call_fn, conv2d_17_layer_call_and_return_conditional_losses, conv2d_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_17_layer_call_fn, conv2d_17_layer_call_and_return_conditional_losses, conv2d_17_layer_call_fn, conv2d_17_layer_call_and_return_conditional_losses, conv2d_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_6_layer_call_fn, re_lu_6_layer_call_and_return_conditional_losses, re_lu_6_layer_call_fn, re_lu_6_layer_call_and_return_conditional_losses, re_lu_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_6_layer_call_fn, re_lu_6_layer_call_and_return_conditional_losses, re_lu_6_layer_call_fn, re_lu_6_layer_call_and_return_conditional_losses, re_lu_6_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_7_layer_call_fn, re_lu_7_layer_call_and_return_conditional_losses, re_lu_7_layer_call_fn, re_lu_7_layer_call_and_return_conditional_losses, re_lu_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_16_layer_call_fn, add_16_layer_call_and_return_conditional_losses, add_16_layer_call_fn, add_16_layer_call_and_return_conditional_losses, add_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_7_layer_call_fn, re_lu_7_layer_call_and_return_conditional_losses, add_16_layer_call_fn, add_16_layer_call_and_return_conditional_losses, re_lu_7_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_8_layer_call_fn, re_lu_8_layer_call_and_return_conditional_losses, re_lu_8_layer_call_fn, re_lu_8_layer_call_and_return_conditional_losses, re_lu_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_8_layer_call_fn, re_lu_8_layer_call_and_return_conditional_losses, re_lu_8_layer_call_fn, re_lu_8_layer_call_and_return_conditional_losses, re_lu_8_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_9_layer_call_fn, re_lu_9_layer_call_and_return_conditional_losses, re_lu_9_layer_call_fn, re_lu_9_layer_call_and_return_conditional_losses, re_lu_9_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_16_layer_call_fn, concatenate_16_layer_call_and_return_conditional_losses, concatenate_16_layer_call_fn, concatenate_16_layer_call_and_return_conditional_losses, concatenate_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_9_layer_call_fn, re_lu_9_layer_call_and_return_conditional_losses, concatenate_16_layer_call_fn, concatenate_16_layer_call_and_return_conditional_losses, re_lu_9_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_10_layer_call_fn, re_lu_10_layer_call_and_return_conditional_losses, re_lu_10_layer_call_fn, re_lu_10_layer_call_and_return_conditional_losses, re_lu_10_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_18_layer_call_fn, conv2d_18_layer_call_and_return_conditional_losses, conv2d_18_layer_call_fn, conv2d_18_layer_call_and_return_conditional_losses, conv2d_18_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_10_layer_call_fn, re_lu_10_layer_call_and_return_conditional_losses, conv2d_18_layer_call_fn, conv2d_18_layer_call_and_return_conditional_losses, re_lu_10_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_11_layer_call_fn, re_lu_11_layer_call_and_return_conditional_losses, re_lu_11_layer_call_fn, re_lu_11_layer_call_and_return_conditional_losses, re_lu_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_11_layer_call_fn, re_lu_11_layer_call_and_return_conditional_losses, re_lu_11_layer_call_fn, re_lu_11_layer_call_and_return_conditional_losses, re_lu_11_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_12_layer_call_fn, re_lu_12_layer_call_and_return_conditional_losses, re_lu_12_layer_call_fn, re_lu_12_layer_call_and_return_conditional_losses, re_lu_12_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as dense_7_layer_call_fn, dense_7_layer_call_and_return_conditional_losses, dense_7_layer_call_fn, dense_7_layer_call_and_return_conditional_losses, dense_7_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_12_layer_call_fn, re_lu_12_layer_call_and_return_conditional_losses, dense_7_layer_call_fn, dense_7_layer_call_and_return_conditional_losses, re_lu_12_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_13_layer_call_fn, re_lu_13_layer_call_and_return_conditional_losses, re_lu_13_layer_call_fn, re_lu_13_layer_call_and_return_conditional_losses, re_lu_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_13_layer_call_fn, re_lu_13_layer_call_and_return_conditional_losses, re_lu_13_layer_call_fn, re_lu_13_layer_call_and_return_conditional_losses, re_lu_13_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_14_layer_call_fn, re_lu_14_layer_call_and_return_conditional_losses, re_lu_14_layer_call_fn, re_lu_14_layer_call_and_return_conditional_losses, re_lu_14_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_15_layer_call_fn, re_lu_15_layer_call_and_return_conditional_losses, re_lu_15_layer_call_fn, re_lu_15_layer_call_and_return_conditional_losses, re_lu_15_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_14_layer_call_fn, re_lu_14_layer_call_and_return_conditional_losses, re_lu_15_layer_call_fn, re_lu_15_layer_call_and_return_conditional_losses, re_lu_14_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_16_layer_call_fn, re_lu_16_layer_call_and_return_conditional_losses, re_lu_16_layer_call_fn, re_lu_16_layer_call_and_return_conditional_losses, re_lu_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_16_layer_call_fn, re_lu_16_layer_call_and_return_conditional_losses, re_lu_16_layer_call_fn, re_lu_16_layer_call_and_return_conditional_losses, re_lu_16_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_17_layer_call_fn, add_17_layer_call_and_return_conditional_losses, add_17_layer_call_fn, add_17_layer_call_and_return_conditional_losses, add_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as add_17_layer_call_fn, add_17_layer_call_and_return_conditional_losses, add_17_layer_call_fn, add_17_layer_call_and_return_conditional_losses, add_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_17_layer_call_fn, concatenate_17_layer_call_and_return_conditional_losses, concatenate_17_layer_call_fn, concatenate_17_layer_call_and_return_conditional_losses, concatenate_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Function `_wrapped_model` contains input name(s) inputs/0, inputs/1 with unsupported characters which will be renamed to inputs_0, inputs_1 in the SavedModel.\n",
|
||||
"WARNING:absl:Found untraced functions such as concatenate_17_layer_call_fn, concatenate_17_layer_call_and_return_conditional_losses, concatenate_17_layer_call_fn, concatenate_17_layer_call_and_return_conditional_losses, concatenate_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_19_layer_call_fn, conv2d_19_layer_call_and_return_conditional_losses, conv2d_19_layer_call_fn, conv2d_19_layer_call_and_return_conditional_losses, conv2d_19_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_19_layer_call_fn, conv2d_19_layer_call_and_return_conditional_losses, conv2d_19_layer_call_fn, conv2d_19_layer_call_and_return_conditional_losses, conv2d_19_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_17_layer_call_fn, re_lu_17_layer_call_and_return_conditional_losses, re_lu_17_layer_call_fn, re_lu_17_layer_call_and_return_conditional_losses, re_lu_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_17_layer_call_fn, re_lu_17_layer_call_and_return_conditional_losses, re_lu_17_layer_call_fn, re_lu_17_layer_call_and_return_conditional_losses, re_lu_17_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as conv2d_20_layer_call_fn, conv2d_20_layer_call_and_return_conditional_losses, conv2d_21_layer_call_fn, conv2d_21_layer_call_and_return_conditional_losses, conv2d_20_layer_call_fn while saving (showing 5 of 10). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_18_layer_call_fn, re_lu_18_layer_call_and_return_conditional_losses, re_lu_18_layer_call_fn, re_lu_18_layer_call_and_return_conditional_losses, re_lu_18_layer_call_and_return_conditional_losses while saving (showing 5 of 5). These functions will not be directly callable after loading.\n",
|
||||
"WARNING:absl:Found untraced functions such as re_lu_18_layer_call_fn, re_lu_18_layer_call_and_return_conditional_losses, conv2d_20_layer_call_fn, conv2d_20_layer_call_and_return_conditional_losses, conv2d_21_layer_call_fn while saving (showing 5 of 15). These functions will not be directly callable after loading.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# connect to backend\n",
|
||||
"from nn_meter.builder.backends import connect_backend\n",
|
||||
"backend = connect_backend(backend='tflite_cpu')\n",
|
||||
"\n",
|
||||
"# run testcases and collect profiling results\n",
|
||||
"from nn_meter.builder import profile_models\n",
|
||||
"profiled_results = profile_models(backend, origin_testcases, mode='ruletest')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`backend` refers to the framework of the platform and device to execute the model. Currently we provide three instance on two platforms, i.e., CPU backend, GPU backend with TFLite platform, and VPU backend with OpenVINO platform. Refer to [backend guidance](../docs/builder/backend.md) for how to setup the device and get connection to the backend. In case having run test cases before, user could also use test case json file to avoid running test cases again:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"profiled_results = os.path.join(workspace_path, \"fusion_rule_test\", \"results\", \"profiled_results.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, users could detect the fusion rule according to profiled test cases by running the following code. After running `detect_fusion_rule`, a json file named `<workspace-path>/results/detected_testcases.json` will be created as the detection result. The result shows each test case obeys the fusion rule or not."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# determine fusion rules from profiling results\n",
|
||||
"from nn_meter.builder.backend_meta.fusion_rule_tester import detect_fusion_rule\n",
|
||||
"detected_results = detect_fusion_rule(profiled_results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"For block [BF_se_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_reshape_reshape], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_reshape_relu], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_reshape_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_relu_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_dense], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_relu_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_conv], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_relu_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_relu_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [MON], the kernel fusion is detected as [case1]\n",
|
||||
"For block [RT], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dwconv_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dense_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_dense_dense], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dense_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_dense_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_relu], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_convtrans_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_se], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_conv_reshape], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_conv_relu], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_hswish], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_dwconv], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_conv_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_conv_avgpool], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_conv_add], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_concat_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_relu], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_dense], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_concat_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_concat_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_avgpool_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_conv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_avgpool_add], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_reshape], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_relu], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_add_dwconv], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_dense], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_convtrans], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_conv], the kernel fusion is detected as [True]\n",
|
||||
"For block [BF_add_concat], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_avgpool], the kernel fusion is detected as [False]\n",
|
||||
"For block [BF_add_add], the kernel fusion is detected as [False]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for k, v in detected_results.items():\n",
|
||||
" print(f\"For block [{k}], the kernel fusion is detected as [{v['obey']}]\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "e8b26d4e88bfd67f05264bddcd3043513b547d3c68498482ebbb5473451a395f"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6.10 64-bit ('py36-Jiahang': conda)",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.10"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -7,7 +7,6 @@ try:
|
|||
except ModuleNotFoundError:
|
||||
__version__ = 'UNKNOWN'
|
||||
|
||||
import logging
|
||||
from functools import partial, partialmethod
|
||||
|
||||
from .predictor import (
|
||||
|
@ -22,12 +21,14 @@ from .ir_converter import (
|
|||
)
|
||||
from .utils import (
|
||||
create_user_configs,
|
||||
change_user_data_folder
|
||||
change_user_data_folder,
|
||||
download_from_url
|
||||
)
|
||||
from .dataset import bench_dataset
|
||||
from .utils import download_from_url
|
||||
|
||||
|
||||
# setup logging
|
||||
import sys
|
||||
import logging
|
||||
logging.KEYINFO = 22
|
||||
logging.addLevelName(logging.KEYINFO, 'KEYINFO')
|
||||
logging.Logger.keyinfo = partialmethod(logging.Logger.log, logging.KEYINFO)
|
||||
|
@ -37,3 +38,11 @@ logging.RESULT = 25
|
|||
logging.addLevelName(logging.RESULT, 'RESULT')
|
||||
logging.Logger.result = partialmethod(logging.Logger.log, logging.RESULT)
|
||||
logging.result = partial(logging.log, logging.RESULT)
|
||||
|
||||
from logging import Formatter, StreamHandler
|
||||
handler = StreamHandler(sys.stdout)
|
||||
handler.setFormatter(Formatter("(nn-Meter) %(message)s"))
|
||||
logger = logging.getLogger("nn-Meter")
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.propagate = False
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .config_manager import builder_config
|
||||
from .nn_meter_builder import (
|
||||
convert_models,
|
||||
profile_models,
|
||||
build_predictor_for_kernel,
|
||||
build_latency_predictor
|
||||
)
|
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .interface import generate_testcases, detect_fusion_rule
|
||||
from .generate_testcase import BaseTestCase
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
class SingleOpModel(tf.keras.Model):
|
||||
def __init__(self, op):
|
||||
super().__init__()
|
||||
self.op = op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.op(inputs)
|
||||
|
||||
|
||||
class TwoOpModel(tf.keras.Model):
|
||||
def __init__(self, op1, op2, op1_is_two_inputs, op2_is_two_inputs):
|
||||
super().__init__()
|
||||
self.op1 = op1
|
||||
self.op2 = op2
|
||||
self.op1_is_two_inputs = op1_is_two_inputs
|
||||
self.op2_is_two_inputs = op2_is_two_inputs
|
||||
|
||||
def call(self, inputs):
|
||||
if self.op1_is_two_inputs:
|
||||
x = self.op1([inputs[0], inputs[1]])
|
||||
else:
|
||||
if self.op2_is_two_inputs:
|
||||
x = self.op1(inputs[0])
|
||||
else:
|
||||
x = self.op1(inputs)
|
||||
if self.op2_is_two_inputs:
|
||||
x = self.op2([x, inputs[-1]])
|
||||
else:
|
||||
x = self.op2(x)
|
||||
return x
|
|
@ -0,0 +1,272 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import importlib
|
||||
from tensorflow import keras
|
||||
from .utils import get_operator_by_name, generate_model_for_testcase
|
||||
from .build_models import SingleOpModel
|
||||
from nn_meter.builder.backend_meta.utils import Latency
|
||||
|
||||
__BUILTIN_TESTCASES__ = {'MON'}
|
||||
|
||||
__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')
|
||||
__registry_cfg_filename__ = 'registry.yaml'
|
||||
__REG_TESTCASES__ = {}
|
||||
if os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):
|
||||
with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:
|
||||
registry_modules = yaml.load(fp, yaml.FullLoader)
|
||||
if "testcases" in registry_modules:
|
||||
__REG_TESTCASES__ = registry_modules["testcases"]
|
||||
|
||||
|
||||
class BaseTestCase:
|
||||
name = ''
|
||||
cases = None
|
||||
true_case = ''
|
||||
deps = {}
|
||||
input_shape = None
|
||||
|
||||
def __init__(self, config, **kwargs):
|
||||
self._kwargs = kwargs
|
||||
self.latency = {}
|
||||
self.config = config
|
||||
self.load_config()
|
||||
|
||||
def generate_testcase(self):
|
||||
testcase = {}
|
||||
model, shapes = self._model_block()
|
||||
testcase['block'] = {
|
||||
'model': model,
|
||||
'shapes': shapes,
|
||||
}
|
||||
|
||||
for _, ops in self.cases.items():
|
||||
for op in ops:
|
||||
try:
|
||||
model, shapes = getattr(self, '_model_' + op)()
|
||||
testcase[op] = {
|
||||
'model': model,
|
||||
'shapes': shapes
|
||||
}
|
||||
except:
|
||||
layer, _, op1_is_two_inputs = get_operator_by_name(op, self.input_shape, self.config)
|
||||
model = SingleOpModel(layer)
|
||||
shapes = [self.input_shape] * (1 + op1_is_two_inputs)
|
||||
testcase[op] = {
|
||||
'model': model,
|
||||
'shapes': shapes
|
||||
}
|
||||
return testcase
|
||||
|
||||
def save_testcase(self):
|
||||
from nn_meter.builder.nn_generator.tf_networks.utils import get_tensor_by_shapes
|
||||
testcase = self.generate_testcase()
|
||||
|
||||
for op, model in testcase.items():
|
||||
model_path = os.path.join(self.model_dir, self.name + '_' + op)
|
||||
model['model'](get_tensor_by_shapes(model['shapes']))
|
||||
keras.models.save_model(model['model'], model_path)
|
||||
testcase[op]['model'] = model_path
|
||||
|
||||
return testcase
|
||||
|
||||
def load_latency(self, testcase):
|
||||
self.latency['block'] = Latency(testcase['block']['latency'])
|
||||
|
||||
for case, ops in self.cases.items():
|
||||
latency_sum = 0
|
||||
for op in ops:
|
||||
if op not in self.latency:
|
||||
self.latency[op] = Latency(testcase[op]['latency'])
|
||||
latency_sum += self.latency[op]
|
||||
self.latency[case] = latency_sum
|
||||
|
||||
def test(self):
|
||||
true_case_lat_diff = abs(self.latency[self.true_case].avg - self.latency['block'].avg)
|
||||
|
||||
for case, _ in self.cases.items():
|
||||
if case != self.true_case and true_case_lat_diff > abs(self.latency[case].avg - self.latency['block'].avg):
|
||||
return case
|
||||
|
||||
return self.true_case
|
||||
|
||||
def load_config(self):
|
||||
config = self.config
|
||||
if not self.input_shape:
|
||||
self.input_shape = [config['HW'], config['HW'], config['CIN']]
|
||||
self.kernel_size = config['KERNEL_SIZE']
|
||||
self.cout = config['COUT']
|
||||
self.padding = config['PADDING']
|
||||
self.model_dir = os.path.join(config['MODEL_DIR'], 'models')
|
||||
os.makedirs(self.model_dir, exist_ok=True)
|
||||
|
||||
def _model_block(self):
|
||||
pass
|
||||
|
||||
|
||||
class BasicFusion(BaseTestCase):
|
||||
name = ''
|
||||
cases = {
|
||||
'ops': ['', ''],
|
||||
}
|
||||
false_case = 'ops'
|
||||
|
||||
def load_config(self):
|
||||
super().load_config()
|
||||
self.eps = self.config['EMP_ALPHA']
|
||||
|
||||
def test(self):
|
||||
secondary_op_lat = min(lat for op, lat in self.latency.items() if op != 'block' or op != self.false_case)
|
||||
return self.latency[self.false_case].avg - self.latency['block'].avg > self.eps * secondary_op_lat.avg
|
||||
|
||||
def load_latency(self, testcase):
|
||||
self.latency['block'] = Latency(testcase['block']['latency'])
|
||||
|
||||
op1, op2 = self.cases['ops']
|
||||
op1_alias, op2_alias = op1, op2
|
||||
|
||||
if op1_alias == op2_alias:
|
||||
op1_alias += '_1'
|
||||
op2_alias += '_2'
|
||||
|
||||
self.latency[op1_alias] = Latency(testcase[op1_alias]['latency'])
|
||||
self.latency[op2_alias] = Latency(testcase[op2_alias]['latency'])
|
||||
self.latency['ops'] = self.latency[op1_alias] + self.latency[op2_alias]
|
||||
|
||||
def generate_testcase(self):
|
||||
testcase = {}
|
||||
|
||||
op1, op2 = self.cases['ops']
|
||||
op1_alias, op2_alias = op1, op2
|
||||
|
||||
if op1_alias == op2_alias:
|
||||
op1_alias += '_1'
|
||||
op2_alias += '_2'
|
||||
|
||||
op1_model, op2_model, block_model, op1_shapes, op2_shapes, block_shapes = \
|
||||
generate_model_for_testcase(op1, op2, self.input_shape, self.config)
|
||||
testcase[op1_alias] = {
|
||||
'model': op1_model,
|
||||
'shapes': op1_shapes,
|
||||
}
|
||||
testcase[op2_alias] = {
|
||||
'model': op2_model,
|
||||
'shapes': op2_shapes,
|
||||
}
|
||||
testcase['block'] = {
|
||||
'model': block_model,
|
||||
'shapes': block_shapes,
|
||||
}
|
||||
return testcase
|
||||
|
||||
|
||||
class MultipleOutNodes(BaseTestCase):
|
||||
name = 'MON'
|
||||
cases = {
|
||||
'case1': ['relu_relu', 'relu_dwconv', 'dwconv'],
|
||||
'case2': ['dwconv_relu_relu', 'relu_dwconv'],
|
||||
'case3': ['dwconv_relu', 'dwconv', 'relu_relu']
|
||||
}
|
||||
true_case = 'case1'
|
||||
deps = {
|
||||
'BF_dwconv_relu': True,
|
||||
}
|
||||
|
||||
def _model_block(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
branch_1 = keras.layers.ReLU(negative_slope=0)(x)
|
||||
branch_1 = keras.layers.ReLU(negative_slope=0)(branch_1)
|
||||
branch_2 = keras.layers.ReLU(negative_slope=2)(x)
|
||||
branch_2 = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(branch_2)
|
||||
|
||||
return keras.models.Model(input_layer, [branch_1, branch_2]), [self.input_shape]
|
||||
|
||||
def _model_relu_relu(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.ReLU()(input_layer)
|
||||
x = keras.layers.ReLU()(x)
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
def _model_dwconv_relu_relu(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
x = keras.layers.ReLU()(x)
|
||||
x = keras.layers.ReLU()(x)
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
def _model_relu_dwconv(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.ReLU()(input_layer)
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(x)
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
def _model_dwconv_relu(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
x = keras.layers.ReLU()(x)
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
def _model_dwconv(self):
|
||||
input_layer = keras.Input(shape=self.input_shape)
|
||||
|
||||
x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)
|
||||
|
||||
return keras.models.Model(input_layer, x), [self.input_shape]
|
||||
|
||||
|
||||
def generate_testcases():
|
||||
testcases_list = {}
|
||||
from nn_meter.builder import builder_config
|
||||
config = builder_config.get_module('ruletest')
|
||||
|
||||
if config['BASIC_TESTCASES'] != None:
|
||||
testcases = [case.split('_') for case in config['BASIC_TESTCASES']]
|
||||
d1_required_layers = config['LAYERS_1D']
|
||||
for op1, op2 in testcases:
|
||||
class_name = f'BasicFusion_{op1}_{op2}'
|
||||
name = f'BF_{op1}_{op2}'
|
||||
cases = {
|
||||
'ops': [op1, op2],
|
||||
}
|
||||
if op1 in d1_required_layers or op2 in d1_required_layers:
|
||||
input_shape = [config['SHAPE_1D']]
|
||||
else:
|
||||
input_shape = [config['HW'], config['HW'], config['CIN']]
|
||||
bf_cls = type(class_name, (BasicFusion,), {
|
||||
'name': name,
|
||||
'cases': cases,
|
||||
'input_shape': input_shape,
|
||||
})
|
||||
testcases_list[bf_cls.name] = bf_cls
|
||||
|
||||
if config['OTHER_TESTCASES'] != None:
|
||||
for testcase in config['OTHER_TESTCASES']:
|
||||
if testcase in __BUILTIN_TESTCASES__:
|
||||
testcases_list[testcase] = MultipleOutNodes
|
||||
else:
|
||||
try:
|
||||
testcase_info = __REG_TESTCASES__[testcase]
|
||||
sys.path.append(testcase_info["package_location"])
|
||||
testcase_module_name = testcase_info["class_name"]
|
||||
testcase_module = importlib.import_module(testcase_info["class_module"])
|
||||
testcase_cls = getattr(testcase_module, testcase_module_name)
|
||||
testcases_list[testcase] = testcase_cls
|
||||
except:
|
||||
raise KeyError(f'Unsupported test case: {testcase}.')
|
||||
|
||||
return testcases_list
|
||||
|
||||
def list_testcases():
|
||||
return __BUILTIN_TESTCASES__ + ["* " + item for item in list(__REG_TESTCASES__.keys())]
|
|
@ -0,0 +1,55 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from ..utils import read_profiled_results
|
||||
from nn_meter.builder.utils import merge_prev_info
|
||||
|
||||
|
||||
def generate_testcases():
|
||||
"""generate testcases and save the testcase models and testcase json file in the workspace
|
||||
"""
|
||||
from nn_meter.builder import builder_config
|
||||
config = builder_config.get_module('ruletest')
|
||||
|
||||
from .test_fusion_rule import FusionRuleTester
|
||||
tester = FusionRuleTester()
|
||||
testcases = tester.generate()
|
||||
|
||||
# save information to json file
|
||||
ws_path = config['MODEL_DIR']
|
||||
info_save_path = os.path.join(ws_path, "results", "origin_testcases.json")
|
||||
new_testcases = merge_prev_info(new_info=testcases, info_save_path=info_save_path)
|
||||
os.makedirs(os.path.dirname(info_save_path), exist_ok=True)
|
||||
with open(info_save_path, 'w') as fp:
|
||||
json.dump(new_testcases, fp, indent=4)
|
||||
logging.keyinfo(f"Save the original testcases information to {info_save_path}")
|
||||
return testcases
|
||||
|
||||
|
||||
def detect_fusion_rule(profiled_testcases):
|
||||
""" detect fusion rule by testcases latency value
|
||||
@params:
|
||||
|
||||
testcases: the Dict of testcases or the path of the testcase json file
|
||||
"""
|
||||
if isinstance(profiled_testcases, str):
|
||||
with open(profiled_testcases, 'r') as fp:
|
||||
profiled_testcases = read_profiled_results(json.load(fp))
|
||||
|
||||
from .test_fusion_rule import FusionRuleTester
|
||||
tester = FusionRuleTester()
|
||||
result = tester.analyze(profiled_testcases)
|
||||
|
||||
# save information to json file
|
||||
from nn_meter.builder import builder_config
|
||||
config = builder_config.get_module('ruletest')
|
||||
ws_path = config['MODEL_DIR']
|
||||
info_save_path = os.path.join(ws_path, "results", "detected_fusion_rule.json")
|
||||
new_result = merge_prev_info(new_info=result, info_save_path=info_save_path)
|
||||
os.makedirs(os.path.dirname(info_save_path), exist_ok=True)
|
||||
with open(info_save_path, 'w') as fp:
|
||||
json.dump(new_result, fp, indent=4)
|
||||
logging.keyinfo(f"Save the detected fusion rule information to {info_save_path}")
|
||||
return result
|
|
@ -0,0 +1,58 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import networkx as nx
|
||||
from .generate_testcase import generate_testcases
|
||||
from nn_meter.builder import builder_config
|
||||
|
||||
config = builder_config.get_module('ruletest')
|
||||
|
||||
|
||||
class FusionRuleTester:
|
||||
def __init__(self):
|
||||
self._testcases = generate_testcases()
|
||||
|
||||
def _build_dep_dag(self):
|
||||
dag = nx.DiGraph()
|
||||
|
||||
for name, cls in self._testcases.items():
|
||||
dag.add_node(name)
|
||||
for dep in cls.deps:
|
||||
dag.add_edge(dep, name)
|
||||
|
||||
self._dag = list(nx.topological_sort(dag))
|
||||
|
||||
def generate(self):
|
||||
testcases = {}
|
||||
|
||||
for name, cls in self._testcases.items():
|
||||
testcases[name] = cls(config).save_testcase()
|
||||
|
||||
return testcases
|
||||
|
||||
def analyze(self, profile_results):
|
||||
self._build_dep_dag()
|
||||
result = {}
|
||||
|
||||
for name in self._dag:
|
||||
if name not in profile_results:
|
||||
continue
|
||||
|
||||
result[name] = {}
|
||||
rule_cls = self._testcases[name]
|
||||
|
||||
obey = True
|
||||
for dep, expect in rule_cls.deps.items():
|
||||
if result[dep]['obey'] != expect:
|
||||
obey = False
|
||||
|
||||
if obey:
|
||||
rule = rule_cls(config)
|
||||
rule.load_latency(profile_results[name])
|
||||
obey = rule.test()
|
||||
if config['DETAIL']:
|
||||
latency = {key: str(value) for key, value in rule.latency.items()}
|
||||
result[name]['latency'] = latency
|
||||
|
||||
result[name]['obey'] = obey
|
||||
|
||||
return result
|
|
@ -0,0 +1,87 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import importlib
|
||||
|
||||
__BUILTIN_OPERATORS__ = {
|
||||
# builtin_name: module_name
|
||||
"conv": "Conv",
|
||||
"dwconv": "DwConv",
|
||||
"convtrans": "ConvTrans",
|
||||
"bn": "BN",
|
||||
"globalavgpool": "GlobalAvgpool",
|
||||
"maxpool": "MaxPool",
|
||||
"avgpool": "AvgPool",
|
||||
"se": "SE",
|
||||
"fc": "FC",
|
||||
"relu": "Relu",
|
||||
"relu6": "Relu6",
|
||||
"sigmoid": "Sigmoid",
|
||||
"hswish": "Hswish",
|
||||
"reshape": "Reshape",
|
||||
"add": "Add",
|
||||
"concat": "Concat",
|
||||
"flatten": "Flatten",
|
||||
"split": "Split"
|
||||
}
|
||||
|
||||
__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')
|
||||
__registry_cfg_filename__ = 'registry.yaml'
|
||||
__REG_OPERATORS__ = {}
|
||||
if os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):
|
||||
with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:
|
||||
registry_modules = yaml.load(fp, yaml.FullLoader)
|
||||
if "operators" in registry_modules:
|
||||
__REG_OPERATORS__ = registry_modules["operators"]
|
||||
|
||||
|
||||
def get_operator_by_name(operator_name, input_shape, config = None):
|
||||
""" get operator information by builtin name
|
||||
"""
|
||||
if operator_name in __REG_OPERATORS__:
|
||||
operator_info = __REG_OPERATORS__[operator_name]
|
||||
sys.path.append(operator_info["package_location"])
|
||||
operator_module_name = operator_info["class_name"]
|
||||
operator_module = importlib.import_module(operator_info["class_module"])
|
||||
|
||||
elif operator_name in __BUILTIN_OPERATORS__:
|
||||
operator_module_name = __BUILTIN_OPERATORS__[operator_name]
|
||||
from nn_meter.builder.nn_generator.tf_networks import operators
|
||||
operator_module = operators
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported operator name: {operator_name}. Please register the operator first.")
|
||||
|
||||
operator_cls = getattr(operator_module, operator_module_name)(input_shape, config)
|
||||
operator = operator_cls.get_model()
|
||||
output_shape = operator_cls.get_output_shape()
|
||||
op_is_two_inputs = operator_cls.get_is_two_inputs()
|
||||
|
||||
return operator, output_shape, op_is_two_inputs
|
||||
|
||||
|
||||
def generate_model_for_testcase(op1, op2, input_shape, config):
|
||||
from .build_models import SingleOpModel, TwoOpModel
|
||||
from nn_meter.builder.nn_generator.tf_networks.utils import get_inputs_by_shapes
|
||||
layer1, op1_output_shape, op1_is_two_inputs = get_operator_by_name(op1, input_shape, config)
|
||||
layer2, _, op2_is_two_inputs = get_operator_by_name(op2, op1_output_shape, config)
|
||||
|
||||
op1_model = SingleOpModel(layer1)
|
||||
op1_shapes = [input_shape] * (1 + op1_is_two_inputs)
|
||||
op1_model(get_inputs_by_shapes(op1_shapes))
|
||||
|
||||
op2_model = SingleOpModel(layer2)
|
||||
op2_shapes = [op1_output_shape] * (1 + op2_is_two_inputs)
|
||||
op2_model(get_inputs_by_shapes(op2_shapes))
|
||||
|
||||
block_model = TwoOpModel(layer1, layer2, op1_is_two_inputs, op2_is_two_inputs)
|
||||
block_shapes = [input_shape] * (1 + op1_is_two_inputs) + [op1_output_shape] * op2_is_two_inputs
|
||||
block_model(get_inputs_by_shapes(block_shapes))
|
||||
|
||||
return op1_model, op2_model, block_model, op1_shapes, op2_shapes, block_shapes
|
||||
|
||||
|
||||
def list_operators():
|
||||
return __BUILTIN_OPERATORS__ + ["* " + item for item in list(__REG_OPERATORS__.keys())]
|
|
@ -0,0 +1,119 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import math
|
||||
import copy
|
||||
from typing import List
|
||||
|
||||
|
||||
class ProfiledResults:
|
||||
def __init__(self, results=None):
|
||||
"""
|
||||
Initialize the profiled results of test models running on backends.
|
||||
|
||||
@params:
|
||||
|
||||
results: Dict
|
||||
The profiled results, with Dict key to be the metric name. Metrics
|
||||
include: latency, peak/average power, energy, memory, etc.
|
||||
"""
|
||||
self.data = {}
|
||||
for metric, value in results.items():
|
||||
self.data[metric] = value
|
||||
|
||||
def set(self, metric, value):
|
||||
""" Set metric value by its name
|
||||
"""
|
||||
self.data[metric] = value
|
||||
|
||||
def get(self, metrics):
|
||||
''' Get metric value by calling the name of the metric.
|
||||
'''
|
||||
if not isinstance(metrics, List):
|
||||
metrics = [metrics]
|
||||
result = {}
|
||||
for metric in metrics:
|
||||
if metric in self.data:
|
||||
result[metric] = self.data[metric]
|
||||
else:
|
||||
raise AttributeError(f"Unsupported metric {metric}.")
|
||||
return result
|
||||
|
||||
def _dump(self):
|
||||
return {metric: str(value) for metric, value in self.data}
|
||||
|
||||
|
||||
class Latency:
|
||||
def __init__(self, avg=0, std=0):
|
||||
if isinstance(avg, str):
|
||||
avg, std = avg.split('+-')
|
||||
self.avg = float(avg)
|
||||
self.std = float(std)
|
||||
elif isinstance(avg, Latency):
|
||||
self.avg, self.std = avg.avg, avg.std
|
||||
else:
|
||||
self.avg = avg
|
||||
self.std = std
|
||||
|
||||
def __str__(self):
|
||||
return f'{self.avg} +- {self.std}'
|
||||
|
||||
def __add__(self, rhs):
|
||||
if isinstance(rhs, Latency):
|
||||
return Latency(self.avg + rhs.avg, math.sqrt(self.std ** 2 + rhs.std ** 2))
|
||||
else:
|
||||
return Latency(self.avg + rhs, self.std)
|
||||
|
||||
def __radd__(self, lhs):
|
||||
return self.__add__(lhs)
|
||||
|
||||
def __mul__(self, rhs):
|
||||
return Latency(self.avg * rhs, self.std * rhs)
|
||||
|
||||
def __rmul__(self, lhs):
|
||||
return self.__mul__(lhs)
|
||||
|
||||
def __le__(self, rhs):
|
||||
return self.avg < rhs.avg
|
||||
|
||||
def __gt__(self, rhs):
|
||||
return self.avg > rhs.avg
|
||||
|
||||
def __neg__(self):
|
||||
return Latency(-self.avg, -self.std)
|
||||
|
||||
def __sub__(self, rhs):
|
||||
return self + rhs.__neg__()
|
||||
|
||||
|
||||
def dump_profiled_results(results, detail = False):
|
||||
''' convert Latency instance to string and return profiled results
|
||||
|
||||
@params
|
||||
|
||||
detail: if False, only metrics result will be dumped to the profiled results. Otherwise models information
|
||||
will be dumpled, too.
|
||||
'''
|
||||
dumped_results = {}
|
||||
for module_key, module in results.items():
|
||||
dumped_results[module_key] = {}
|
||||
for model_key, model in module.items():
|
||||
dumped_results[module_key][model_key] = {}
|
||||
if detail:
|
||||
for info_key, info in model.items():
|
||||
if info_key == 'latency':
|
||||
dumped_results[module_key][model_key]['latency'] = str(model['latency'])
|
||||
else:
|
||||
dumped_results[module_key][model_key][info_key] = info
|
||||
else:
|
||||
if 'latency' in model:
|
||||
dumped_results[module_key][model_key]['latency'] = str(model['latency'])
|
||||
return dumped_results
|
||||
|
||||
|
||||
def read_profiled_results(results):
|
||||
results_copy = copy.deepcopy(results)
|
||||
for item in results_copy.values():
|
||||
for model in item.values():
|
||||
if 'latency' in model:
|
||||
model['latency'] = Latency(model['latency'])
|
||||
return results_copy
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .interface import (
|
||||
BaseBackend,
|
||||
BaseProfiler,
|
||||
BaseParser,
|
||||
connect_backend,
|
||||
list_backends
|
||||
)
|
|
@ -0,0 +1,192 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import importlib
|
||||
|
||||
|
||||
__BUILTIN_BACKENDS__ = {
|
||||
"tflite_cpu": {
|
||||
"class_module": "nn_meter.builder.backends.tflite",
|
||||
"class_name": "TFLiteCPUBackend"
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"class_module": "nn_meter.builder.backends.tflite",
|
||||
"class_name": "TFLiteGPUBackend"
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"class_module": "nn_meter.builder.backends.openvino",
|
||||
"class_name": "OpenVINOVPUBackend"
|
||||
}
|
||||
}
|
||||
|
||||
__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')
|
||||
__registry_cfg_filename__ = 'registry.yaml'
|
||||
__REG_BACKENDS__ = {}
|
||||
if os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):
|
||||
with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:
|
||||
registry_modules = yaml.load(fp, yaml.FullLoader)
|
||||
if "backends" in registry_modules:
|
||||
__REG_BACKENDS__ = registry_modules["backends"]
|
||||
|
||||
|
||||
class BaseBackend:
|
||||
"""
|
||||
the base backend class to instantiate a backend instance. If users want to implement their own backend,
|
||||
the customized Backend should inherit this class.
|
||||
|
||||
@params:
|
||||
|
||||
profiler_class: a subclass inherit form `nn_meter.builder.backend.BaseProfiler` to specify the running command of
|
||||
the backend. A profiler contains commands to push the model to mobile device, run the model on the mobile device,
|
||||
get stdout from the mobile device, and related operations. In the implementation of a profiler, an interface of
|
||||
``Profiler.profile()`` is required.
|
||||
|
||||
parser_class: a subclass inherit form `nn_meter.builder.backend.BaseParser` to parse the profiled results.
|
||||
A parser parses the stdout from devices profiler and get required metrics. In the implementation of a parser, interface
|
||||
of `Parser.parse()` and property of `Parser.results()` are required.
|
||||
"""
|
||||
profiler_class = None
|
||||
parser_class = None
|
||||
|
||||
def __init__(self, configs):
|
||||
""" class initialization with required configs
|
||||
"""
|
||||
self.configs = configs
|
||||
self.update_configs()
|
||||
if self.parser_class:
|
||||
self.parser = self.parser_class(**self.parser_kwargs)
|
||||
if self.profiler_class:
|
||||
self.profiler = self.profiler_class(**self.profiler_kwargs)
|
||||
|
||||
def update_configs(self):
|
||||
""" update the config parameters for the backend
|
||||
"""
|
||||
self.parser_kwargs = {}
|
||||
self.profiler_kwargs = {}
|
||||
|
||||
def convert_model(self, model_path, save_path, input_shape=None):
|
||||
""" convert the Keras model instance to the type required by the backend inference.
|
||||
|
||||
@params:
|
||||
|
||||
model_path: the Keras model waiting to profile
|
||||
|
||||
model_name: the name of the model
|
||||
|
||||
save_path: path to save the converted model
|
||||
|
||||
input_shape: the shape of input tensor for inference, a random tensor according to the shape will be
|
||||
generated and used
|
||||
"""
|
||||
# convert model and save the converted model to path `converted_model`
|
||||
converted_model = ...
|
||||
return converted_model
|
||||
|
||||
def profile(self, converted_model, metrics = ['latency'], input_shape = None):
|
||||
"""
|
||||
run the model on the backend, return required metrics of the running results. nn-Meter only support latency
|
||||
for metric by now. Users may provide other metrics in their customized backend.
|
||||
|
||||
converted_model: the model path in type of backend required
|
||||
|
||||
metrics: a list of required metrics name. Defaults to ['latency']
|
||||
|
||||
"""
|
||||
return self.parser.parse(self.profiler.profile(converted_model)).results.get(metrics)
|
||||
|
||||
def profile_model_file(self, model_path, save_path, input_shape = None, metrics = ['latency']):
|
||||
""" load model by model file path, convert model file, and run ``self.profile()``
|
||||
"""
|
||||
converted_model = self.convert_model(model_path, save_path, input_shape)
|
||||
res = self.profile(converted_model, metrics, input_shape)
|
||||
return res
|
||||
|
||||
def test_connection(self):
|
||||
""" check the status of backend interface connection.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class BaseProfiler:
|
||||
"""
|
||||
Specify the profiling command of the backend. A profiler contains commands to push the model to mobile device, run the model
|
||||
on the mobile device, get stdout from the mobile device, and related operations.
|
||||
"""
|
||||
def profile(self):
|
||||
""" Main steps of ``Profiler.profile()`` includes 1) push the model file to edge devices, 2) run models in required times
|
||||
and get back running results. Return the running results on edge device.
|
||||
"""
|
||||
output = ''
|
||||
return output
|
||||
|
||||
|
||||
class BaseParser:
|
||||
"""
|
||||
Parse the profiled results. A parser parses the stdout from devices runner and get required metrics.
|
||||
"""
|
||||
def parse(self, content):
|
||||
""" A string parser to parse profiled results value from the standard output of devices runner. This method should return the instance
|
||||
class itself.
|
||||
|
||||
@params
|
||||
|
||||
content: the standard output from device
|
||||
"""
|
||||
return self
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
""" warp the parsed results by ``ProfiledResults`` class from ``nn_meter.builder.backend_meta.utils`` and return the parsed results value.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def connect_backend(backend_name):
|
||||
"""
|
||||
Return the required backend class, and feed params to the backend. Supporting backend: tflite_cpu, tflite_gpu, openvino_vpu.
|
||||
|
||||
Available backend and corresponding configs:
|
||||
- For backend based on TFLite platform: {
|
||||
'MODEL_DIR': path to the folder (on host device) where temporary models will be generated.
|
||||
'REMOTE_MODEL_DIR': path to the folder (on mobile device) where temporary models will be copied to.
|
||||
'KERNEL_PATH': path (on mobile device) where the kernel implementations will be dumped.
|
||||
'BENCHMARK_MODEL_PATH': path (on android device) where the binary file `benchmark_model` is deployed.
|
||||
'DEVICE_SERIAL': if there are multiple adb devices connected to your host, you need to provide the \\
|
||||
corresponding serial id. Set to '' if there is only one device connected to your host.
|
||||
}
|
||||
- For backend based on OpenVINO platform: {
|
||||
'OPENVINO_ENV': path to openvino virtualenv (./docs/requirements/openvino_requirements.txt is provided)
|
||||
'OPTIMIZER_PATH': path to openvino optimizer
|
||||
'OPENVINO_RUNTIME_DIR': directory to openvino runtime
|
||||
'DEVICE_SERIAL': serial id of the device
|
||||
'DATA_TYPE': data type of the model (e.g., fp16, fp32)
|
||||
}
|
||||
|
||||
The config can be declared and modified after create a workspace. Users could follow guidance from ./docs/builder/backend.md
|
||||
|
||||
@params:
|
||||
backend: name of backend or backend class (subclass instance of `BaseBackend`).
|
||||
"""
|
||||
if backend_name in __REG_BACKENDS__:
|
||||
backend_info = __REG_BACKENDS__[backend_name]
|
||||
sys.path.append(backend_info["package_location"])
|
||||
elif backend_name in __BUILTIN_BACKENDS__:
|
||||
backend_info = __BUILTIN_BACKENDS__[backend_name]
|
||||
else:
|
||||
raise ValueError(f"Unsupported backend name: {backend_name}. Please register the backend first.")
|
||||
|
||||
module = backend_info["class_module"]
|
||||
name = backend_info["class_name"]
|
||||
backend_module = importlib.import_module(module)
|
||||
backend_cls = getattr(backend_module, name)
|
||||
|
||||
# load configs from workspace
|
||||
from nn_meter.builder import builder_config
|
||||
configs = builder_config.get_module('backend')
|
||||
return backend_cls(configs)
|
||||
|
||||
|
||||
def list_backends():
|
||||
return list(__BUILTIN_BACKENDS__.keys()) + ["* " + item for item in list(__REG_BACKENDS__.keys())]
|
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .openvino_backend import OpenVINOBackend
|
||||
from .openvino_profiler import OpenVINOProfiler
|
||||
from .vpu import OpenVINOVPUBackend
|
|
@ -0,0 +1,3 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .patcher_wrapper import patch_frozenpb
|
|
@ -0,0 +1,369 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import tensorflow as tf
|
||||
from google.protobuf import text_format
|
||||
from tensorflow import io
|
||||
|
||||
|
||||
def dec2octpkg4(input):
|
||||
octPkgStr = ''
|
||||
for i in range(4):
|
||||
octPkgStr = octPkgStr + oct((input >> (i * 8)) %
|
||||
256).replace('0o', '\\')
|
||||
return octPkgStr
|
||||
|
||||
|
||||
KEEP_DIM_PATCH =\
|
||||
'''
|
||||
attr {
|
||||
key: "keep_dims"
|
||||
value {
|
||||
b: {KEEP_DIM}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
REDUCE_DIM_PATCH =\
|
||||
'''
|
||||
node {
|
||||
name: "reshape/Reshape/shape"
|
||||
op: "Const"
|
||||
attr {
|
||||
key: "dtype"
|
||||
value {
|
||||
type: DT_INT32
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "value"
|
||||
value {
|
||||
tensor {
|
||||
dtype: DT_INT32
|
||||
tensor_shape {
|
||||
dim {
|
||||
size: 2
|
||||
}
|
||||
}
|
||||
tensor_content: "{SHAPE}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node {
|
||||
name: "reshape/Reshape"
|
||||
op: "Reshape"
|
||||
input: "{INPUT_TENSOR_NAME}"
|
||||
input: "reshape/Reshape/shape"
|
||||
attr {
|
||||
key: "T"
|
||||
value {
|
||||
type: DT_FLOAT
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "Tshape"
|
||||
value {
|
||||
type: DT_INT32
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
SWISH_PATCH =\
|
||||
'''
|
||||
node {
|
||||
name: "{NAME}/Sigmoid"
|
||||
op: "Sigmoid"
|
||||
input: "{INPUT}"
|
||||
attr {
|
||||
key: "T"
|
||||
value {
|
||||
type: DT_FLOAT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node {
|
||||
name: "{NAME}/mul"
|
||||
op: "Mul"
|
||||
input: "{INPUT}"
|
||||
input: "{NAME}/Sigmoid"
|
||||
attr {
|
||||
key: "T"
|
||||
value {
|
||||
type: DT_FLOAT
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
MEAN_PATCH =\
|
||||
'''
|
||||
node {
|
||||
name: "{NAME}"
|
||||
op: "AvgPool"
|
||||
input: "{INPUT}"
|
||||
attr {
|
||||
key: "T"
|
||||
value {
|
||||
type: DT_FLOAT
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "data_format"
|
||||
value {
|
||||
s: "NHWC"
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "ksize"
|
||||
value {
|
||||
list {
|
||||
i: 1
|
||||
i: {KERNEL_SIZE}
|
||||
i: {KERNEL_SIZE}
|
||||
i: 1
|
||||
}
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "padding"
|
||||
value {
|
||||
s: "VALID"
|
||||
}
|
||||
}
|
||||
attr {
|
||||
key: "strides"
|
||||
value {
|
||||
list {
|
||||
i: 1
|
||||
i: 1
|
||||
i: 1
|
||||
i: 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
EXPLICT_PAD_ATTR_REGEX = r'attr {\n[\s]+key: "explicit_paddings"\n[\s]'\
|
||||
r'+value {\n[\s]+list {\n[\s]+}\n[\s]+}\n[\s]+}'
|
||||
|
||||
U_KEY_ATTR_REGEX = r'attr {\n[\s]+key: "U"\n[\s]+value {\n[\s]+type: DT_FLOAT\n[\s]+}\n[\s]+}'
|
||||
|
||||
OUTPUT_SHAPE_REGEX_3 = r'([\s]+attr {\n[\s]+key: "_output_shapes"\n[\s]+value {\n[\s]+list {\n['\
|
||||
r'\s]+)(shape[\s]+{[\s]+([\s]+(dim[\s]+{\s+size:[\s]+[0-9]+[\s]+})|([\s]+'\
|
||||
r'unknown_rank: \w+([\s]+})+))+([\s]+}[\s]+)+)+([\s]})+'
|
||||
|
||||
OUTPUT_SHAPE_REGEX_1 = r'attr {\n[\s]+key: "_output_shapes"\n[\s]+value {\n[\s]+list {\n[\s]+shape '\
|
||||
r'{\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}'\
|
||||
r'\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}\n'\
|
||||
r'[\s]+}\n[\s]+shape {\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}\n[\s]+}\n[\s]+shape'\
|
||||
r' {\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]+}\n[\s]+}\n[\s]+shape {\n[\s]+dim {\n[\s'\
|
||||
r']+size: [0-9]+\n[\s]+}\n[\s]+}\n[\s]+shape {\n[\s]+dim {\n[\s]+size: [0-9]+\n[\s]'\
|
||||
r'+}\n[\s]+}\n[\s]+shape {\n[\s]+unknown_rank: true\n[\s]+}\n[\s]+}\n[\s]+}\n[\s]+}'
|
||||
|
||||
OUTPUT_SHAPE_REGEX_2 = r'[\s]+attr {\n[\s]+key: "_output_shapes"\n[\s]+value {\n[\s]+list {\n[\s]+shape '\
|
||||
r'{([\s]+dim[\s]+{\n[\s]+size:[\s]+[0-9]+\n[\s]+}\n)+([\s]+}\n)+'
|
||||
|
||||
REDUCTION_IDENCE_REGEX = r'node[\s]+{\n[\s]+name:[\s]+\"[^"]+reduction_indices'\
|
||||
r'\"\n[\s]+op:[\s]+\"Const\"[\s]+[\s]+attr[\s]+{\n[\s]+k'\
|
||||
r'ey:[\s]+\"dtype\"\n[\s]+value[\s]+{\n[\s]+type:[\s]+DT'\
|
||||
r'_INT32\n[\s]+}\n[\s]+}\n[\s]+attr[\s]+{\n[\s]+key:[\s]'\
|
||||
r'+\"value\"\n[\s]+value[\s]+{\n[\s]+tensor[\s]+{\n[\s]+'\
|
||||
r'dtype:[\s]+DT_INT32\n[\s]+tensor_shape[\s]+{\n[\s]+dim'\
|
||||
r'[\s]+{\n[\s]+size:[\s]+2\n[\s]+}\n[\s]+}\n[\s]+tensor_'\
|
||||
r'content:[\s]+\"\\001\\000\\000\\000\\002\\000\\000\\00'\
|
||||
r'0\"\n+([\s]+}\n[\s]+)(}\n[\s]+}\n})'
|
||||
|
||||
|
||||
def pbtxt_processing(content):
|
||||
if content.find('explicit_paddings') != -1:
|
||||
print('Find unsupported attr: explicit_paddings, removing...\n')
|
||||
content = re.sub(EXPLICT_PAD_ATTR_REGEX, '', content)
|
||||
|
||||
meanCounter = content.count('op: "Mean"')
|
||||
if meanCounter > 1: # May not work
|
||||
endMeanNodeName = ""
|
||||
print(f'Find semi-supported op: Mean presented {meanCounter} times in pb, patching...')
|
||||
while True:
|
||||
meanOpLoc = content.find('op: "Mean"')
|
||||
if meanOpLoc == -1:
|
||||
break
|
||||
nodeNameLoc = content.rfind('name', 0, meanOpLoc)
|
||||
nodeNameDLoc = content.find('"', nodeNameLoc)
|
||||
nodeName = content[nodeNameDLoc +
|
||||
1:content.find('"', nodeNameDLoc + 1)]
|
||||
|
||||
nodeInputLoc = content.find('input', meanOpLoc)
|
||||
nodeInputDLoc = content.find('"', nodeInputLoc)
|
||||
nodeInputName = content[nodeInputDLoc +
|
||||
1:content.find('"', nodeInputDLoc + 1)]
|
||||
|
||||
inputNodeNameLoc = content.find(f'name: "{nodeInputName}"')
|
||||
inputNodeEnd = content.find('node', inputNodeNameLoc)
|
||||
inputNodeShape = re.findall(
|
||||
r'[\d]+\n', content[inputNodeNameLoc:inputNodeEnd])
|
||||
|
||||
if len(inputNodeShape) != 4:
|
||||
print(
|
||||
f'Unexpected happened in shape inference, infered shape: {inputNodeShape} in node [{nodeName}]')
|
||||
sys.exit(-1)
|
||||
|
||||
for i in range(len(inputNodeShape)):
|
||||
inputNodeShape[i] = int(inputNodeShape[i].replace('\n', ''))
|
||||
print(
|
||||
f'Found Node name: {nodeName}, Input Shape: {inputNodeShape}\nPatching the Mean operator...')
|
||||
|
||||
nodeStart = content.rfind('{', 0, nodeNameLoc)
|
||||
nodeEnd = content.find('node', nodeNameLoc)
|
||||
|
||||
if content[nodeStart:nodeEnd].find(
|
||||
KEEP_DIM_PATCH.replace('{KEEP_DIM}', 'false')) != -1:
|
||||
print('Find reduce mean at top, ignore and break.')
|
||||
endMeanNodeName = nodeName
|
||||
break
|
||||
|
||||
print(f'Generating the patcher, node input: {nodeInputName}')
|
||||
patcher = MEAN_PATCH.replace('{NAME}', nodeName)
|
||||
patcher = patcher.replace('{INPUT}', nodeInputName)
|
||||
patcher = patcher.replace('{KERNEL_SIZE}', str(inputNodeShape[1]))
|
||||
|
||||
print('Inserting patch and removing the Mean node...\n')
|
||||
content = content[:content.rfind(
|
||||
'node', 0, nodeStart)] + patcher + content[nodeEnd:]
|
||||
|
||||
print('Removing unused const.\n')
|
||||
content = re.sub(REDUCTION_IDENCE_REGEX, '', content)
|
||||
|
||||
while True:
|
||||
indecOpLoc = content.find('reduction_indices')
|
||||
if indecOpLoc == -1:
|
||||
break
|
||||
indecNameLoc = content.rfind('name', 0, indecOpLoc)
|
||||
indecStart = content.rfind('{', 0, indecNameLoc)
|
||||
indecEnd = content.find('node', indecNameLoc)
|
||||
if content[indecStart:indecEnd].find(endMeanNodeName) != -1:
|
||||
break
|
||||
content = content[:content.rfind(
|
||||
'node', 0, indecStart)] + content[indecEnd:]
|
||||
|
||||
if content.find('AddV2') != -1:
|
||||
print('Find unsupported op: AddV2, patching...\n')
|
||||
content = content.replace('AddV2', 'Add')
|
||||
|
||||
if content.find(KEEP_DIM_PATCH.replace('{KEEP_DIM}', 'false')) != -1:
|
||||
print('Find unsupported op: reduce_dim=false, patching...')
|
||||
|
||||
while True:
|
||||
keepDimLoc = content.find(
|
||||
KEEP_DIM_PATCH.replace(
|
||||
'{KEEP_DIM}', 'false'))
|
||||
if keepDimLoc == -1:
|
||||
break
|
||||
|
||||
nodeNameLoc = content.rfind('name', 0, keepDimLoc)
|
||||
nodeNameDLoc = content.find('"', nodeNameLoc)
|
||||
nodeName = content[nodeNameDLoc +
|
||||
1:content.find('"', nodeNameDLoc + 1)]
|
||||
print(
|
||||
f'Found Node name: {nodeName}, Output Shape: {OUTPUT_FILTER}, Oct: {dec2octpkg4(OUTPUT_FILTER)}')
|
||||
print('Patching the Mean operator...')
|
||||
|
||||
nodeEnd = content.find('node', nodeNameLoc)
|
||||
content = content.replace(
|
||||
f'input: "{nodeName}"',
|
||||
'input: "reshape/Reshape"')
|
||||
|
||||
patcher = REDUCE_DIM_PATCH.replace('{INPUT_TENSOR_NAME}', nodeName)
|
||||
patcher = patcher.replace(
|
||||
'{SHAPE}', f'\\377\\377\\377\\377{dec2octpkg4(OUTPUT_FILTER)}')
|
||||
|
||||
content = content[:nodeEnd] + patcher + content[nodeEnd:]
|
||||
|
||||
content = content.replace(
|
||||
KEEP_DIM_PATCH.replace(
|
||||
'{KEEP_DIM}', 'false'), KEEP_DIM_PATCH.replace(
|
||||
'{KEEP_DIM}', 'true'))
|
||||
print('Modified reduce_dim=true...\n')
|
||||
|
||||
if content.find('FusedBatchNormV3') != -1:
|
||||
print('Find unsupported op: FusedBatchNormV3, patching...\n')
|
||||
content = content.replace('FusedBatchNormV3', 'FusedBatchNorm')
|
||||
content = re.sub(U_KEY_ATTR_REGEX, '', content)
|
||||
content = re.sub(OUTPUT_SHAPE_REGEX_1, '', content)
|
||||
content = re.sub(OUTPUT_SHAPE_REGEX_2, '', content)
|
||||
|
||||
if content.find('op: "swish_f32"') != -1:
|
||||
print('Find unsupported op: swish_f32, patching...')
|
||||
while True:
|
||||
swishOpLoc = content.find('op: "swish_f32"')
|
||||
if swishOpLoc == -1:
|
||||
break
|
||||
nodeNameLoc = content.rfind('name', 0, swishOpLoc)
|
||||
nodeNameDLoc = content.find('"', nodeNameLoc)
|
||||
nodeName = content[nodeNameDLoc +
|
||||
1:content.find('"', nodeNameDLoc + 1)]
|
||||
|
||||
nodeInputLoc = content.find('input', swishOpLoc)
|
||||
nodeInputDLoc = content.find('"', nodeInputLoc)
|
||||
nodeInputName = content[nodeInputDLoc +
|
||||
1:content.find('"', nodeInputDLoc + 1)]
|
||||
|
||||
print(
|
||||
f'Found Node name: {nodeName}\nPatching the swish_f32 operator...')
|
||||
|
||||
nodeStart = content.rfind('{', 0, nodeNameLoc)
|
||||
nodeEnd = content.find('node', nodeNameLoc)
|
||||
|
||||
print(f'Generating the patcher, node input: {nodeInputName}')
|
||||
patcher = SWISH_PATCH.replace('{NAME}', nodeName)
|
||||
patcher = patcher.replace('{INPUT}', nodeInputName)
|
||||
|
||||
print('Inserting patch and removing the swish_f32 node...')
|
||||
content = content[:content.rfind(
|
||||
'node', 0, nodeStart)] + patcher + content[nodeEnd:]
|
||||
|
||||
print('Reconnecting the graph...\n')
|
||||
content = content.replace(
|
||||
f'input: "{nodeName}"',
|
||||
f'input: "{nodeName}/mul"')
|
||||
|
||||
return content
|
||||
|
||||
|
||||
FILE_NAME = sys.argv[1]
|
||||
PBTXT_FILE_NAME = FILE_NAME.replace('.pb', '.pbtxt')
|
||||
|
||||
OUTPUT_FILTER = 1280
|
||||
if len(sys.argv) > 2:
|
||||
OUTPUT_FILTER = int(sys.argv[2])
|
||||
|
||||
if not os.path.isfile(PBTXT_FILE_NAME):
|
||||
f = open(FILE_NAME, 'rb')
|
||||
GRAPH_DEF = tf.compat.v1.get_default_graph().as_graph_def(add_shapes=True)
|
||||
GRAPH_DEF.ParseFromString(f.read())
|
||||
f.close()
|
||||
|
||||
tf.import_graph_def(GRAPH_DEF, name='')
|
||||
io.write_graph(GRAPH_DEF, '', PBTXT_FILE_NAME, as_text=True)
|
||||
else:
|
||||
|
||||
GRAPH_DEF = tf.get_default_graph().as_graph_def(add_shapes=True)
|
||||
|
||||
FILE_CONTENT = pbtxt_processing(open(PBTXT_FILE_NAME, 'r').read())
|
||||
|
||||
print('Content check OK, start merging...')
|
||||
|
||||
text_format.Merge(FILE_CONTENT, GRAPH_DEF)
|
||||
io.write_graph(GRAPH_DEF,
|
||||
os.path.dirname(FILE_NAME),
|
||||
os.path.basename(FILE_NAME).split('.')[0] + '_patched.pb',
|
||||
as_text=False)
|
||||
|
||||
os.remove(PBTXT_FILE_NAME)
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
def patch_frozenpb(graph_path, interpreter_path):
|
||||
"""
|
||||
Patch a frozen pb file to make it compatible with Movidius VPU and then return the path to the patched pb file.
|
||||
@params:
|
||||
|
||||
graph_path: Path to the frozen pb file.
|
||||
|
||||
interpreter_path: the path of python interpreter
|
||||
"""
|
||||
scripts_dir = os.path.abspath(os.path.dirname(__file__))
|
||||
subprocess.run(
|
||||
f'{interpreter_path} {os.path.join(scripts_dir, "frozenpb_patcher.py")} {graph_path}',
|
||||
shell=True
|
||||
)
|
||||
return os.path.splitext(graph_path)[0] + '_patched.pb'
|
|
@ -0,0 +1,41 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
|
||||
from ..interface import BaseBackend
|
||||
from nn_meter.utils.path import get_filename_without_ext
|
||||
|
||||
|
||||
class OpenVINOBackend(BaseBackend):
|
||||
parser_class = None
|
||||
profiler_class = None
|
||||
|
||||
def update_configs(self):
|
||||
"""update the config parameters for OpenVINO platform
|
||||
"""
|
||||
super().update_configs()
|
||||
self.profiler_kwargs.update({
|
||||
'venv': self.configs['OPENVINO_ENV'],
|
||||
'optimizer': self.configs['OPTIMIZER_PATH'],
|
||||
'runtime_dir': self.configs['OPENVINO_RUNTIME_DIR'],
|
||||
'serial': self.configs['DEVICE_SERIAL'],
|
||||
'data_type': self.configs['DATA_TYPE'],
|
||||
})
|
||||
self.venv = self.configs['OPENVINO_ENV']
|
||||
|
||||
def convert_model(self, model, model_name, savedpath, input_shape=None):
|
||||
"""convert the Keras model instance to frozen pb file
|
||||
"""
|
||||
from .utils.converters import keras_model_to_frozenpb
|
||||
from .frozenpb_patcher import patch_frozenpb
|
||||
model_tmp_dir = os.path.join(savedpath, model_name)
|
||||
pb_path, _ = keras_model_to_frozenpb(model, model_tmp_dir, model_name, input_shape)
|
||||
patched_pb_path = patch_frozenpb(pb_path, os.path.join(self.venv, 'bin/python'))
|
||||
return patched_pb_path
|
||||
|
||||
def profile(self, converted_model, metrics = ['latency'], input_shape = None):
|
||||
"""convert the model to the backend platform and run the model on the backend, return required metrics
|
||||
of the running results. We only support latency for metric by now.
|
||||
"""
|
||||
self.profiler.load_graph(converted_model, self.tmp_dir)
|
||||
return self.parser.parse(self.profiler.profile(input_shape)).results.get(metrics)
|
|
@ -0,0 +1,92 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import subprocess
|
||||
import numpy as np
|
||||
import shutil
|
||||
import serial
|
||||
|
||||
from .utils import restart
|
||||
from ..interface import BaseProfiler
|
||||
from nn_meter.utils.pyutils import get_pyver
|
||||
|
||||
|
||||
class OpenVINOProfiler(BaseProfiler):
|
||||
|
||||
device = None
|
||||
|
||||
def __init__(self, venv, optimizer, runtime_dir, serial, graph_path='', _dst_graph_path='', data_type='FP16'):
|
||||
self._graph_path = graph_path
|
||||
self._venv = venv
|
||||
self._optimizer = optimizer
|
||||
self._dst_graph_path = _dst_graph_path
|
||||
self._runtime_dir = runtime_dir
|
||||
self._serial = serial
|
||||
self._data_type = data_type
|
||||
|
||||
def load_graph(self, graph_path, dst_graph_path):
|
||||
self._graph_path = graph_path
|
||||
self._dst_graph_path = dst_graph_path
|
||||
|
||||
def profile(self, shapes, retry=2):
|
||||
interpreter_path = os.path.join(self._venv, 'bin/python')
|
||||
pyver = get_pyver(interpreter_path)
|
||||
|
||||
subprocess.run(
|
||||
f'{interpreter_path} {self._optimizer} '
|
||||
f'--input_model {self._graph_path} '
|
||||
f'--output_dir {self._dst_graph_path} '
|
||||
f'--data_type {self._data_type}',
|
||||
shell=True
|
||||
)
|
||||
|
||||
filename = os.path.splitext(os.path.basename(self._graph_path))[0]
|
||||
|
||||
input_path = os.path.join(self._dst_graph_path, 'inputs')
|
||||
if os.path.exists(input_path):
|
||||
shutil.rmtree(input_path)
|
||||
|
||||
os.mkdir(input_path)
|
||||
for index, shape in enumerate(shapes):
|
||||
np.random.rand(*shape).astype('float32').tofile(os.path.join(input_path, f'input_{index}.bin'))
|
||||
|
||||
output = ''
|
||||
|
||||
with serial.Serial(self._serial, 115200, timeout=1) as ser:
|
||||
restart(ser)
|
||||
|
||||
command = (
|
||||
f'. {os.path.join(self._venv, "bin/activate")}; '
|
||||
f'cd {self._runtime_dir}; '
|
||||
f'. {os.path.join(self._runtime_dir, "setupvars.sh")} -pyver {pyver}; '
|
||||
f'{os.path.join(self._runtime_dir, "benchmark_app")} '
|
||||
f'-i {input_path} '
|
||||
f'-m {os.path.join(self._dst_graph_path, filename + ".xml")} '
|
||||
f'-d {self.device} '
|
||||
f'-report_type detailed_counters '
|
||||
f'-report_folder {self._dst_graph_path} '
|
||||
f'-niter 50 '
|
||||
f'-nireq 1 '
|
||||
f'-api sync'
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
subprocess.run(
|
||||
f'bash -c "{command}"',
|
||||
shell=True,
|
||||
timeout=30,
|
||||
)
|
||||
output = open(os.path.join(self._dst_graph_path, 'benchmark_detailed_counters_report.csv'), 'r').read()
|
||||
break
|
||||
except subprocess.TimeoutExpired as e:
|
||||
print(e)
|
||||
|
||||
if retry == 0:
|
||||
raise e
|
||||
print('Retrying...')
|
||||
restart(ser)
|
||||
|
||||
retry -= 1
|
||||
|
||||
return output
|
|
@ -0,0 +1,3 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .restart import restart
|
|
@ -0,0 +1,38 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
import os
|
||||
|
||||
|
||||
def keras_model_to_frozenpb(model, frozen_out_path, frozen_graph_filename, shapes, dtype=tf.dtypes.float32):
|
||||
full_model = tf.function(lambda x: model(x))
|
||||
if len(shapes) == 1:
|
||||
tensor_specs = tf.TensorSpec([1] + shapes[0], dtype)
|
||||
else:
|
||||
tensor_specs = [tf.TensorSpec([1] + shape, dtype) for shape in shapes]
|
||||
full_model = full_model.get_concrete_function(tensor_specs)
|
||||
|
||||
frozen_func = convert_variables_to_constants_v2(full_model)
|
||||
frozen_func.graph.as_graph_def()
|
||||
|
||||
frozen_graph_filename_bin = frozen_graph_filename + '.pb'
|
||||
frozen_graph_filename_txt = frozen_graph_filename + '.pbtxt'
|
||||
|
||||
tf.io.write_graph(
|
||||
graph_or_graph_def=frozen_func.graph,
|
||||
logdir=frozen_out_path,
|
||||
name=frozen_graph_filename_bin,
|
||||
as_text=False
|
||||
)
|
||||
tf.io.write_graph(
|
||||
graph_or_graph_def=frozen_func.graph,
|
||||
logdir=frozen_out_path,
|
||||
name=frozen_graph_filename_txt,
|
||||
as_text=True
|
||||
)
|
||||
|
||||
return (
|
||||
os.path.join(frozen_out_path, frozen_graph_filename_bin),
|
||||
os.path.join(frozen_out_path, frozen_graph_filename_txt),
|
||||
)
|
|
@ -0,0 +1,8 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import time
|
||||
|
||||
|
||||
def restart(ser):
|
||||
ser.write(b'all_toggle\n')
|
||||
time.sleep(0.5)
|
|
@ -0,0 +1,54 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import re
|
||||
from .openvino_profiler import OpenVINOProfiler
|
||||
from .openvino_backend import OpenVINOBackend
|
||||
from ..interface import BaseParser
|
||||
from nn_meter.builder.backend_meta.utils import Latency, ProfiledResults
|
||||
|
||||
|
||||
class OpenVINOVPULatencyParser(BaseParser):
|
||||
|
||||
def parse(self, content):
|
||||
self.layers = self._parse_layers(content)
|
||||
self.comp_layer_latency = sum(
|
||||
Latency(layer['realtime'])
|
||||
for layer in self.layers
|
||||
if layer['layer_name'] != '<Extra>'
|
||||
)
|
||||
return self
|
||||
|
||||
def _parse_layers(self, content):
|
||||
layer_regex = r'^([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);$'
|
||||
layers = []
|
||||
for match in re.findall(layer_regex, content, re.MULTILINE):
|
||||
try:
|
||||
layers.append({
|
||||
'layer_name': match[0],
|
||||
'exec_status': match[1],
|
||||
'layer_type': match[2],
|
||||
'exec_type': match[3],
|
||||
'realtime': float(match[4]),
|
||||
'cputime': float(match[5]),
|
||||
})
|
||||
except:
|
||||
pass
|
||||
return layers
|
||||
|
||||
@property
|
||||
def latency(self):
|
||||
return self.comp_layer_latency
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
results = ProfiledResults({'latency': self.latency})
|
||||
return results
|
||||
|
||||
|
||||
class OpenVINOVPUProfiler(OpenVINOProfiler):
|
||||
device = "MYRIAD"
|
||||
|
||||
|
||||
class OpenVINOVPUBackend(OpenVINOBackend):
|
||||
parser_class = OpenVINOVPULatencyParser
|
||||
profiler_class = OpenVINOVPUProfiler
|
|
@ -0,0 +1,6 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .tflite_backend import TFLiteBackend
|
||||
from .tflite_profiler import TFLiteProfiler
|
||||
from .cpu import TFLiteCPUBackend
|
||||
from .gpu import TFLiteGPUBackend
|
|
@ -0,0 +1,70 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import re
|
||||
from .tflite_profiler import TFLiteProfiler
|
||||
from .tflite_backend import TFLiteBackend
|
||||
from ..interface import BaseParser
|
||||
from nn_meter.builder.backend_meta.utils import Latency, ProfiledResults
|
||||
|
||||
|
||||
class TFLiteCPULatencyParser(BaseParser):
|
||||
def __init__(self):
|
||||
self.nodes = []
|
||||
self.total_latency = Latency()
|
||||
|
||||
def parse(self, content):
|
||||
self.nodes = self._parse_nodes(content)
|
||||
self.total_latency = self._parse_total_latency(content)
|
||||
return self
|
||||
|
||||
def _parse_nodes(self, content):
|
||||
start_regex = r'[= ]*Run Order[= ]*'
|
||||
end_regex = r'[= ]*Top by Computation Time[= ]*'
|
||||
node_regex = r'\s*(\w+)\s*[\d.e-]+\s*[\d.e-]+\s*([\d.e-]+)\s*[\d.e-]+%\s*[\d.e-]+%\s*[\d.e-]+\s*1\s*(\S*)'
|
||||
flag = False
|
||||
|
||||
nodes = []
|
||||
for line in content.splitlines():
|
||||
if flag:
|
||||
match = re.search(node_regex, line)
|
||||
if match:
|
||||
nodes.append({
|
||||
'node_type': match[1],
|
||||
'avg': float(match[2]),
|
||||
'name': match[3],
|
||||
})
|
||||
if re.search(start_regex, line):
|
||||
flag = True
|
||||
if re.search(end_regex, line):
|
||||
flag = False
|
||||
|
||||
return nodes
|
||||
|
||||
def _parse_total_latency(self, content):
|
||||
total_latency_regex = r'Timings \(microseconds\): count=[\d.e-]+ first=[\d.e-]+ curr=[\d.e-]+ min=[\d.e-]+ max=[\d.e-]+ avg=([\d.e-]+) std=([\d.e-]+)'
|
||||
|
||||
total_latency = Latency()
|
||||
match = re.search(total_latency_regex, content, re.MULTILINE)
|
||||
if match:
|
||||
# convert microseconds to millisecond
|
||||
total_latency = Latency(float(match[1]) / 1000, float(match[2]) / 1000)
|
||||
|
||||
return total_latency
|
||||
|
||||
@property
|
||||
def latency(self):
|
||||
return self.total_latency
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
results = ProfiledResults({'latency': self.latency})
|
||||
return results
|
||||
|
||||
|
||||
class TFLiteCPUProfiler(TFLiteProfiler):
|
||||
use_gpu = False
|
||||
|
||||
|
||||
class TFLiteCPUBackend(TFLiteBackend):
|
||||
parser_class = TFLiteCPULatencyParser
|
||||
profiler_class = TFLiteCPUProfiler
|
|
@ -0,0 +1,182 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import re
|
||||
from .tflite_profiler import TFLiteProfiler
|
||||
from .tflite_backend import TFLiteBackend
|
||||
from ..interface import BaseParser
|
||||
from nn_meter.builder.backend_meta.utils import Latency, ProfiledResults
|
||||
|
||||
|
||||
class TFLiteGPULatencyParser(BaseParser):
|
||||
def __init__(self):
|
||||
self.kernels = []
|
||||
self.realtime = 0
|
||||
self.kernel_sum = 0
|
||||
self.block_name = ''
|
||||
self.raw_content = ''
|
||||
self.before_fused_graph = ''
|
||||
self.after_fused_graph = ''
|
||||
|
||||
def parse(self, content):
|
||||
result = self._parse_time(content)
|
||||
kernel_operation_map = self._parse_kernel_name(content)
|
||||
work_size = self._parse_work_size(content)
|
||||
self.realtime, self.block_name = self._parse_block(content)
|
||||
self.kernel_sum = sum(value[0] for key, value in result.items())
|
||||
self.kernels = [{}] * len(result)
|
||||
self.before_fused_graph, self.after_fused_graph = self._parse_graph(content)
|
||||
self.comp_avg, self.comp_std = self._parse_comp_time(content)
|
||||
self.nodes = self._parse_node_cpu_time(content)
|
||||
self.errors = self._parse_error(content)
|
||||
for key, value in result.items():
|
||||
self.kernels[key] = {
|
||||
'avg': value[0],
|
||||
'std': value[1],
|
||||
'work_size': work_size[key],
|
||||
'name': kernel_operation_map[key],
|
||||
}
|
||||
|
||||
self.comp_kernel_latency = sum((Latency(kernel['avg'], kernel['std']) for kernel in self.kernels if kernel['name'] != 'to/from tensor'), Latency())
|
||||
|
||||
self.raw_content = content
|
||||
|
||||
return self
|
||||
|
||||
@staticmethod
|
||||
def resolve_name(name):
|
||||
name = name.split(' ')
|
||||
if 'linked' in name:
|
||||
ops = []
|
||||
name = [x for x in name if x != ':' and x != 'linked']
|
||||
for i in range(0, len(name), 2):
|
||||
ops.append(name[i])
|
||||
return ops
|
||||
else:
|
||||
return [name[0]]
|
||||
|
||||
@property
|
||||
def latency(self):
|
||||
"""
|
||||
On GPU, we currently decide to use kernel_sum instead of realtime (block) as latency
|
||||
"""
|
||||
return self.comp_kernel_latency
|
||||
|
||||
def _parse_kernel_name(self, content):
|
||||
kernel_name_regex = r'kernel_name\[(\d+)\]=(.*)'
|
||||
kernel_operation_map = {}
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(kernel_name_regex, line)
|
||||
if match:
|
||||
index = int(match[1])
|
||||
kernel_operation_map[index] = match[2]
|
||||
|
||||
return kernel_operation_map
|
||||
|
||||
def _parse_block(self, content):
|
||||
node_regex = r'\s+\w+\s+[\d.e-]+\s+[\d.e-]+\s+([\d.e-]+)[\s\d.%]+(\S+)'
|
||||
|
||||
realtime = 0
|
||||
block_name = ''
|
||||
for line in content.splitlines():
|
||||
match = re.search(node_regex, line)
|
||||
if match:
|
||||
realtime = float(match[1])
|
||||
block_name = match[2]
|
||||
break
|
||||
|
||||
return realtime, block_name
|
||||
|
||||
def _parse_time(self, content):
|
||||
kernel_regex = r'\w+\[(\d+)\]\w+=([\d.e-]+) \w+\[(\d+)\]\w+=([\d.e-]+) ' \
|
||||
r'\w+\[(\d+)\]\w+=([\d.e-]+) \w+\[(\d+)\]\w+=([\d.e-]+)'
|
||||
result = {}
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(kernel_regex, line)
|
||||
if match:
|
||||
index = int(match[1])
|
||||
avg_ms = float(match[2])
|
||||
std_ms = float(match[4])
|
||||
result[index] = (avg_ms, std_ms)
|
||||
|
||||
return result
|
||||
|
||||
def _parse_work_size(self, content):
|
||||
work_size_regex = r'local_work_size\[(\d+)\]=([\d,]+)'
|
||||
work_size = {}
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(work_size_regex, line)
|
||||
if match:
|
||||
index = int(match[1])
|
||||
work_size[index] = match[2]
|
||||
|
||||
return work_size
|
||||
|
||||
def _parse_graph(self, content):
|
||||
before_fused_regex = r'\[Before Fused\](.*)\[end\]'
|
||||
before_fused_pattern = re.compile(before_fused_regex, re.DOTALL)
|
||||
|
||||
before_fused_graph = ''
|
||||
match = before_fused_pattern.search(content)
|
||||
if match:
|
||||
before_fused_graph = match[1]
|
||||
|
||||
after_fused_regex = r'\[After Fused\](.*)\[end\]'
|
||||
after_fused_pattern = re.compile(after_fused_regex, re.DOTALL)
|
||||
|
||||
after_fused_graph = ''
|
||||
match = after_fused_pattern.search(content)
|
||||
if match:
|
||||
after_fused_graph = match[1]
|
||||
|
||||
return before_fused_graph, after_fused_graph
|
||||
|
||||
def _parse_comp_time(self, content):
|
||||
comp_time_regex = r'comp_avg_ms=([\d.e-]+) comp_std_ms=([\d.e-]+)'
|
||||
comp_avg, comp_std = 0, 0
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(comp_time_regex, line)
|
||||
if match:
|
||||
comp_avg = float(match[1])
|
||||
comp_std = float(match[2])
|
||||
|
||||
return comp_avg, comp_std
|
||||
|
||||
def _parse_node_cpu_time(self, content):
|
||||
node_regex = r'(\w+)\s+[\d]+\s+([\d.e-]+)\s+[\d.%]+\s+[\d.%]+\s+[\d.e-]+\s+\d+'
|
||||
nodes = {}
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(node_regex, line)
|
||||
if match:
|
||||
nodes[match[1]] = float(match[2])
|
||||
|
||||
return nodes
|
||||
|
||||
def _parse_error(self, content):
|
||||
error_regex = r'ERROR: (.*)'
|
||||
errors = []
|
||||
|
||||
for line in content.splitlines():
|
||||
match = re.search(error_regex, line)
|
||||
if match:
|
||||
errors.append(match[1])
|
||||
|
||||
return errors
|
||||
|
||||
@property
|
||||
def results(self):
|
||||
results = ProfiledResults({'latency': self.latency})
|
||||
return results
|
||||
|
||||
|
||||
class TFLiteGPUProfiler(TFLiteProfiler):
|
||||
use_gpu = True
|
||||
|
||||
|
||||
class TFLiteGPUBackend(TFLiteBackend):
|
||||
parser_class = TFLiteGPULatencyParser
|
||||
profiler_class = TFLiteGPUProfiler
|
|
@ -0,0 +1,46 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import logging
|
||||
from ..interface import BaseBackend
|
||||
from nn_meter.utils.path import get_filename_without_ext
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class TFLiteBackend(BaseBackend):
|
||||
parser_class = None
|
||||
profiler_class = None
|
||||
|
||||
def update_configs(self):
|
||||
"""update the config parameters for TFLite platform
|
||||
"""
|
||||
super().update_configs()
|
||||
self.profiler_kwargs.update({
|
||||
'dst_kernel_path': self.configs['KERNEL_PATH'],
|
||||
'serial': self.configs['DEVICE_SERIAL'],
|
||||
'benchmark_model_path': self.configs['BENCHMARK_MODEL_PATH'],
|
||||
'dst_graph_path': self.configs['REMOTE_MODEL_DIR']
|
||||
})
|
||||
|
||||
def convert_model(self, model_path, save_path, input_shape=None):
|
||||
"""convert the Keras model instance to ``.tflite`` and return model path
|
||||
"""
|
||||
import tensorflow as tf
|
||||
model_name = get_filename_without_ext(model_path)
|
||||
model = tf.keras.models.load_model(model_path)
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
tflite_model = converter.convert()
|
||||
converted_model = os.path.join(save_path, model_name + '.tflite')
|
||||
open(converted_model, 'wb').write(tflite_model)
|
||||
return converted_model
|
||||
|
||||
def test_connection(self):
|
||||
"""check the status of backend interface connection, ideally including open/close/check_healthy...
|
||||
"""
|
||||
from ppadb.client import Client as AdbClient
|
||||
client = AdbClient(host="127.0.0.1", port=5037)
|
||||
if self.configs['DEVICE_SERIAL']:
|
||||
device = client.device(self.configs['DEVICE_SERIAL'])
|
||||
else:
|
||||
device = client.devices()[0]
|
||||
logging.keyinfo(device.shell("echo hello backend !"))
|
|
@ -0,0 +1,65 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
from ..interface import BaseProfiler
|
||||
|
||||
|
||||
class TFLiteProfiler(BaseProfiler):
|
||||
use_gpu = None
|
||||
|
||||
def __init__(self, dst_kernel_path, benchmark_model_path, graph_path='', dst_graph_path='', serial='', num_threads=1, num_runs=50, warm_ups=10):
|
||||
"""
|
||||
@params:
|
||||
graph_path: graph file. path on host server
|
||||
dst_graph_path: graph file. path on android device
|
||||
kernel_path: dest kernel output file. path on android device
|
||||
benchmark_model_path: path to benchmark_model on android device
|
||||
"""
|
||||
self._serial = serial
|
||||
self._graph_path = graph_path
|
||||
self._dst_graph_path = dst_graph_path
|
||||
self._dst_kernel_path = dst_kernel_path
|
||||
self._benchmark_model_path = benchmark_model_path
|
||||
self._num_threads = num_threads
|
||||
self._num_runs = num_runs
|
||||
self._warm_ups = warm_ups
|
||||
|
||||
def profile(self, graph_path, preserve=False, clean=True, taskset='70'):
|
||||
"""
|
||||
@params:
|
||||
preserve: tflite file exists in remote dir. No need to push it again.
|
||||
clean: remove tflite file after running.
|
||||
"""
|
||||
model_name = os.path.basename(graph_path)
|
||||
remote_graph_path = os.path.join(self._dst_graph_path, model_name)
|
||||
|
||||
from ppadb.client import Client as AdbClient
|
||||
client = AdbClient(host="127.0.0.1", port=5037)
|
||||
if self._serial:
|
||||
device = client.device(self._serial)
|
||||
else:
|
||||
device = client.devices()[0]
|
||||
|
||||
taskset_cmd = f'taskset {taskset}' if taskset else ''
|
||||
|
||||
if not preserve:
|
||||
device.push(graph_path, remote_graph_path)
|
||||
try:
|
||||
kernel_cmd = f'--kernel_path={self._dst_kernel_path}' if self._dst_kernel_path else ''
|
||||
res = device.shell(f' {taskset_cmd} {self._benchmark_model_path} {kernel_cmd}' \
|
||||
f' --num_threads={self._num_threads}' \
|
||||
f' --num_runs={self._num_runs}' \
|
||||
f' --warmup_runs={self._warm_ups}' \
|
||||
f' --graph={remote_graph_path}' \
|
||||
f' --enable_op_profiling=true' \
|
||||
f' --use_gpu={"true" if self.use_gpu else "false"}')
|
||||
except:
|
||||
raise
|
||||
finally:
|
||||
if clean:
|
||||
if self._serial:
|
||||
os.system(f"adb -s 98281FFAZ009SV shell rm {remote_graph_path}")
|
||||
else:
|
||||
os.system(f"adb shell rm {remote_graph_path}")
|
||||
|
||||
return res
|
|
@ -0,0 +1,101 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import yaml
|
||||
import pkg_resources
|
||||
from shutil import copyfile
|
||||
|
||||
__backend_tflite_cfg_filename__ = 'backend_tflite_config.yaml'
|
||||
__backend_openvino_cfg_filename__ = 'backend_openvino_config.yaml'
|
||||
__ruletest_cfg_filename__ = 'ruletest_config.yaml'
|
||||
__predbuild_cfg_filename__ = 'predictorbuild_config.yaml'
|
||||
|
||||
|
||||
def copy_to_workspace(backend_type, workspace_path, backendConfigFile = None):
|
||||
"""copy the default config file to user's workspace
|
||||
"""
|
||||
os.makedirs(os.path.join(workspace_path, 'configs'), exist_ok=True)
|
||||
|
||||
# backend config
|
||||
if backend_type == 'customized':
|
||||
copyfile(backendConfigFile, os.path.join(workspace_path, 'configs', 'backend_config.yaml'))
|
||||
else:
|
||||
if backend_type == 'tflite':
|
||||
config_name = __backend_tflite_cfg_filename__
|
||||
elif backend_type == 'openvino':
|
||||
config_name = __backend_openvino_cfg_filename__
|
||||
copyfile(
|
||||
pkg_resources.resource_filename(".".join(__name__.split('.')[:-2]), 'configs/builder/backends/' + config_name),
|
||||
os.path.join(workspace_path, 'configs', 'backend_config.yaml'))
|
||||
# rule test config
|
||||
copyfile(
|
||||
pkg_resources.resource_filename(".".join(__name__.split('.')[:-2]), f'configs/builder/fusion_rule_tester/' + __ruletest_cfg_filename__),
|
||||
os.path.join(os.path.join(workspace_path, 'configs'), 'ruletest_config.yaml'))
|
||||
# predictor builder config
|
||||
copyfile(
|
||||
pkg_resources.resource_filename(".".join(__name__.split('.')[:-2]), f'configs/builder/predictor_builder/' + __predbuild_cfg_filename__),
|
||||
os.path.join(os.path.join(workspace_path, 'configs'), 'predictorbuild_config.yaml'))
|
||||
|
||||
|
||||
def load_config_file(workspace_path):
|
||||
"""load config file from workspace_path;
|
||||
if the file not located in workspace_path, copy it from distribution
|
||||
"""
|
||||
backend_filepath = os.path.join(workspace_path, "configs", 'backend_config.yaml')
|
||||
ruletest_filepath = os.path.join(workspace_path, "configs", 'ruletest_config.yaml')
|
||||
predictorbuild_filepath = os.path.join(workspace_path, "configs", 'predictorbuild_config.yaml')
|
||||
try:
|
||||
with open(backend_filepath) as fp:
|
||||
backend = yaml.load(fp, yaml.FullLoader)
|
||||
with open(ruletest_filepath) as fp:
|
||||
ruletest = yaml.load(fp, yaml.FullLoader)
|
||||
with open(predictorbuild_filepath) as fp:
|
||||
predictorbuild = yaml.load(fp, yaml.FullLoader)
|
||||
return backend, ruletest, predictorbuild
|
||||
except:
|
||||
raise FileNotFoundError(f"config file in {workspace_path} not found, created")
|
||||
|
||||
|
||||
class ConfigData:
|
||||
def __init__(self):
|
||||
self.workspace_path = ''
|
||||
self._global_settings = {}
|
||||
|
||||
def set(self, name, value, module=''):
|
||||
self._global_settings[module][name] = value
|
||||
|
||||
def set_module(self, value, module=''):
|
||||
self._global_settings[module] = value
|
||||
|
||||
def get(self, name, module=''):
|
||||
try:
|
||||
return self._global_settings[module].get(name)
|
||||
except:
|
||||
raise ValueError(f"Could not find {module} or {module}.{name} in builder config. \
|
||||
Please run `builder_config.init('path/to/workspace')` first.")
|
||||
|
||||
def get_module(self, module=''):
|
||||
try:
|
||||
return self._global_settings[module]
|
||||
except:
|
||||
raise ValueError(f"Could not find {module} in builder config. \
|
||||
Please run `builder_config.init('path/to/workspace')` first.")
|
||||
|
||||
def get_settings(self):
|
||||
return self._global_settings
|
||||
|
||||
|
||||
class ConfigManager(ConfigData):
|
||||
def init(self, workspace_path):
|
||||
self.workspace_path = workspace_path
|
||||
self._load_from_config_file(workspace_path)
|
||||
|
||||
def _load_from_config_file(self, workspace_path):
|
||||
backend, ruletest, predbuild = load_config_file(workspace_path)
|
||||
self.set_module(backend, 'backend')
|
||||
self.set_module(ruletest, 'ruletest')
|
||||
self.set_module(predbuild, 'predbuild')
|
||||
self.set('MODEL_DIR', os.path.join(self.workspace_path, "fusion_rule_test"), 'ruletest')
|
||||
self.set('MODEL_DIR', os.path.join(self.workspace_path, "predictor_build"), 'predbuild')
|
||||
|
||||
builder_config = ConfigManager()
|
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .data_sampler import generate_config_sample, BaseConfigSampler
|
||||
from .predictor_builder import build_predictor_by_data, BaseFeatureParser
|
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .generator import generate_config_sample
|
||||
from .config_sampler import BaseConfigSampler
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .prior_distribution_sampler import *
|
||||
from .finegrained_sampler import *
|
||||
|
||||
|
||||
class BaseConfigSampler:
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
''' utilize the prior data to define the configuration sampling from the prior distribution.
|
||||
'''
|
||||
pass
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
''' for data in `configs`, perform fine-grained data sampling to generate random data around the large error data.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class ConvSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_conv(sample_num)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_conv(configs, sample_num)
|
||||
|
||||
|
||||
class DwConvSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_dwconv(sample_num)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_dwconv(configs, sample_num)
|
||||
|
||||
|
||||
class PoolingSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_pooling(sample_num)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_pooling(sample_num, fix_ks=3, fix_stride=1)
|
||||
|
||||
|
||||
class FCSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
# half samples have fixed cout as 1000, other samples have random cout
|
||||
return sampling_fc(int(sample_num * 0.5), fix_cout = 1000) + sampling_fc(int(sample_num * 0.5), fix_cout = False)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_fc(configs, sample_num)
|
||||
|
||||
|
||||
class ConcatSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_concats(sample_num)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_concats(configs, sample_num)
|
||||
|
||||
|
||||
class CinOddSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_hw_cin_odd(sample_num)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_hw_cin_odd(configs, sample_num)
|
||||
|
||||
|
||||
class GlobalAvgPoolSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
cfgs = sampling_hw_cin(sample_num)
|
||||
new_hws = [3] * (sample_num // 2 + 1) + [7] * (sample_num // 2 + 1)
|
||||
new_hws = new_hws[:len(cfgs)]
|
||||
import random; random.shuffle(new_hws)
|
||||
for cfg, hw in zip(cfgs, new_hws): cfg["HW"] = hw
|
||||
return cfgs
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_hw_cin(configs, sample_num)
|
||||
|
||||
|
||||
class HwCinSampler(BaseConfigSampler):
|
||||
|
||||
def prior_config_sampling(self, sample_num):
|
||||
return sampling_hw_cin(sample_num, resize = True)
|
||||
|
||||
def finegrained_config_sampling(self, configs, sample_num):
|
||||
return finegrained_sampling_hw_cin(configs, sample_num)
|
|
@ -0,0 +1,160 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import random
|
||||
|
||||
|
||||
def sample_in_range(mind, maxd, sample_num):
|
||||
'''sample #sample_num data from a range [mind, maxd)
|
||||
'''
|
||||
# if the sample_num is bigger than sample population, we only keep the number of population to avoid repetition
|
||||
if maxd - mind <= sample_num:
|
||||
data = list(range(mind, maxd))
|
||||
random.shuffle(data)
|
||||
return data
|
||||
else:
|
||||
return random.sample(range(mind, maxd), sample_num)
|
||||
|
||||
|
||||
def sample_cin_cout(cin, cout, sample_num):
|
||||
'''fine-grained sample #sample_num data in the cin and cout dimensions, respectively
|
||||
'''
|
||||
cins = sample_in_range(int(cin * 0.5), int(cin * 1.2), sample_num)
|
||||
couts = sample_in_range(int(cout * 0.5), int(cout * 1.2), sample_num)
|
||||
l = min(len(cins), len(couts)) # align the length of cins and couts
|
||||
cins, couts = cins[:l], couts[:l]
|
||||
return cins, couts
|
||||
|
||||
|
||||
def finegrained_sampling_conv(cfgs, count):
|
||||
'''
|
||||
Sampling configs for conv kernels
|
||||
Returned params include: (hw, cin, cout, kernel_size, strides)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins, couts = sample_cin_cout(cfg['CIN'], cfg['COUT'], count)
|
||||
for cin, cout in zip(cins, couts):
|
||||
c = {
|
||||
'HW': cfg['HW'],
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
'KERNEL_SIZE': cfg['KERNEL_SIZE'],
|
||||
'STRIDES': cfg['STRIDES'],
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_dwconv(cfgs, count):
|
||||
'''
|
||||
Sampling configs for dwconv kernels
|
||||
Returned params include: (hw, cin, kernel_size, strides)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins = sample_in_range(int(cfg['CIN'] * 0.5), int(cfg['CIN'] * 1.2), count)
|
||||
for cin in cins:
|
||||
c = {
|
||||
'HW': cfg['HW'],
|
||||
'CIN': cin,
|
||||
'KERNEL_SIZE': cfg['KERNEL_SIZE'],
|
||||
'STRIDES': cfg['STRIDES'],
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_fc(cfgs, count):
|
||||
'''
|
||||
Sampling configs for fc kernels
|
||||
Returned params include: (cin, cout)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins, couts = sample_cin_cout(cfg['CIN'], cfg['COUT'], count)
|
||||
for cin, cout in zip(cins, couts):
|
||||
c = {
|
||||
'CIN': cin,
|
||||
'COUT': cout
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_pooling(cfgs, count):
|
||||
'''
|
||||
Sampling configs for pooling kernels
|
||||
Returned params include: (hw, cin, kernel_size, pool_strides)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins = sample_in_range(int(cfg['CIN'] * 0.5), int(cfg['CIN'] * 1.2), count)
|
||||
for cin in cins:
|
||||
c = {
|
||||
'HW': cfg['HW'],
|
||||
'CIN': cin,
|
||||
'KERNEL_SIZE': cfg['KERNEL_SIZE'],
|
||||
'STRIDES': cfg['STRIDES'],
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_hw_cin(cfgs, count):
|
||||
''' sampling configs for kernels with hw and cin parameter
|
||||
Returned params include: (hw, cin)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins = sample_in_range(int(cfg['CIN'] * 0.5), int(cfg['CIN'] * 1.2), count)
|
||||
for cin in cins:
|
||||
c = {
|
||||
'CIN': cin,
|
||||
'HW': cfg['HW']
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_hw_cin_odd(cfgs, count):
|
||||
''' sampling configs for kernels with hw and cin (only odd values) parameter, in case for split / se / channelshuffle
|
||||
Returned params include: (hw, cin)
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
cins = sample_in_range(int(cfg['CIN'] * 0.5), int(cfg['CIN'] * 1.2), count)
|
||||
for cin in cins:
|
||||
c = {
|
||||
'CIN': cin + 1 if cin % 2 else cin,
|
||||
'HW': cfg['HW']
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def finegrained_sampling_concats(cfgs, count):
|
||||
''' sampling functions for concat kernel
|
||||
Returned params include: (hw, cin1, cin2, cin3, cin4). Note that we only sample (num of cin) = 2, 3, 4,
|
||||
(cin1, cin2, cin3, cin4) is one-hot vector with unused input channel set as 0.
|
||||
'''
|
||||
ncfgs = []
|
||||
for cfg in cfgs:
|
||||
ncins, total_cins = [], []
|
||||
for cin in [cfg['CIN1'], cfg['CIN2'], cfg['CIN3'], cfg['CIN4']]:
|
||||
if cin == 0:
|
||||
total_cins.append([0] * count)
|
||||
continue
|
||||
cins = sample_in_range(int(cin * 0.5), int(cin * 1.2), count)
|
||||
ncins.append(len(cins))
|
||||
total_cins.append(cins)
|
||||
for j in range(min(ncins)):
|
||||
cins = [total_cins[i][j] for i in range(4)]
|
||||
c = {
|
||||
'HW': cfg['HW'],
|
||||
'CIN1': cins[0],
|
||||
'CIN2': cins[1],
|
||||
'CIN3': cins[2],
|
||||
'CIN4': cins[3]
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
|
@ -0,0 +1,92 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import logging
|
||||
from nn_meter.builder import builder_config
|
||||
from nn_meter.builder.utils import merge_prev_info
|
||||
from .utils import get_sampler_for_kernel, generate_model_for_kernel
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class KernelGenerator:
|
||||
def __init__(self, kernel_type, sample_num, mark = ""):
|
||||
self.kernel_type = kernel_type
|
||||
self.sample_num = sample_num
|
||||
self.ws_path = builder_config.get('MODEL_DIR', 'predbuild')
|
||||
self.case_save_path = os.path.join(self.ws_path, 'models')
|
||||
self.kernel_info = {kernel_type: {}}
|
||||
self.kernels = self.kernel_info[self.kernel_type]
|
||||
self.implement = builder_config.get('IMPLEMENT', 'predbuild')
|
||||
self.mark = mark
|
||||
|
||||
def generate_config(self, sampling_mode = 'prior', configs = None):
|
||||
sampled_cfgs = get_sampler_for_kernel(self.kernel_type, self.sample_num, sampling_mode, configs)
|
||||
for i in range(len(sampled_cfgs)):
|
||||
random_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
|
||||
self.kernels[random_id] = {}
|
||||
self.kernels[random_id]['config'] = sampled_cfgs[i]
|
||||
|
||||
def generate_kernel_by_cfg(self):
|
||||
""" generate tensorflow models for sampled data
|
||||
"""
|
||||
kernel_type = self.kernel_type
|
||||
logging.info(f"building kernel for {kernel_type}...")
|
||||
for id, value in self.kernels.items():
|
||||
model_path = os.path.join(self.case_save_path, "_".join([kernel_type, self.mark, id]))
|
||||
kernel_cfg = value['config']
|
||||
_, input_tensor_shape, config = generate_model_for_kernel(kernel_type, kernel_cfg, save_path=model_path, implement=self.implement)
|
||||
self.kernels[id] = {
|
||||
'model': model_path,
|
||||
'shapes': input_tensor_shape,
|
||||
'config': config
|
||||
}
|
||||
|
||||
def run(self, sampling_mode = 'prior', configs = None):
|
||||
""" sample N configurations for target kernel, generate tensorflow keras model files.
|
||||
|
||||
@params
|
||||
sampling_mode: path of the directory containing all experiment runs. choose from ['prior', 'finegrained']
|
||||
configs: init configs for finegrained sampling
|
||||
"""
|
||||
# sample configs
|
||||
self.generate_config(sampling_mode, configs)
|
||||
|
||||
# for all sampled configurations, save kernels info and generate tensorflow model files
|
||||
self.generate_kernel_by_cfg()
|
||||
logging.info(f'Generate {len(self.kernels)} kernels with kernels model saved in {self.case_save_path}.')
|
||||
return self.kernel_info
|
||||
|
||||
|
||||
def generate_config_sample(kernel_type, sample_num, mark = '', sampling_mode = 'prior', configs = None):
|
||||
""" Generate config sample and return sampled configs.
|
||||
|
||||
@params
|
||||
kernel_type (str): type of kernel
|
||||
|
||||
sample_num (int): the sampling number of configs
|
||||
|
||||
mark (str, optional): the mark for the running results. Defaults to ''.
|
||||
|
||||
sampling_mode (str, optional): the sampling mode for config generation, supporting mode includes 'prior' and 'finegrained'.
|
||||
Defaults to be 'prior'.
|
||||
|
||||
configs (list, optional): is required when the sampling_mode=='finegrained'. The fingrained samples will based on the config
|
||||
in `configs`. Defaults to None.
|
||||
|
||||
"""
|
||||
generator = KernelGenerator(kernel_type, sample_num, mark=mark)
|
||||
kernels_info = generator.run(sampling_mode=sampling_mode, configs=configs)
|
||||
|
||||
# save information to json file in incrementally mode
|
||||
ws_mode_path = builder_config.get('MODEL_DIR', "predbuild")
|
||||
info_save_path = os.path.join(ws_mode_path, "results", f"{kernel_type}_{mark}.json")
|
||||
new_kernels_info = merge_prev_info(new_info=kernels_info, info_save_path=info_save_path)
|
||||
os.makedirs(os.path.dirname(info_save_path), exist_ok=True)
|
||||
with open(info_save_path, 'w') as fp:
|
||||
json.dump(new_kernels_info, fp, indent=4)
|
||||
logging.keyinfo(f"Save the kernel model information to {info_save_path}")
|
||||
|
||||
return kernels_info
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,277 @@
|
|||
model,input_h,input_w,cin,cout,ks,stride,groups
|
||||
shufflenetv2x1-0,56,56,24,24,3,2,24
|
||||
shufflenetv2x1-0,56,56,58,58,3,2,58
|
||||
shufflenetv2x1-0,28,28,58,58,3,1,58
|
||||
shufflenetv2x1-0,28,28,58,58,3,1,58
|
||||
shufflenetv2x1-0,28,28,58,58,3,1,58
|
||||
shufflenetv2x1-0,28,28,116,116,3,2,116
|
||||
shufflenetv2x1-0,28,28,116,116,3,2,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,116,116,3,1,116
|
||||
shufflenetv2x1-0,14,14,232,232,3,2,232
|
||||
shufflenetv2x1-0,14,14,232,232,3,2,232
|
||||
shufflenetv2x1-0,7,7,232,232,3,1,232
|
||||
shufflenetv2x1-0,7,7,232,232,3,1,232
|
||||
shufflenetv2x1-0,7,7,232,232,3,1,232
|
||||
shufflenetv2x2,56,56,24,24,3,2,24
|
||||
shufflenetv2x2,56,56,122,122,3,2,122
|
||||
shufflenetv2x2,28,28,122,122,3,1,122
|
||||
shufflenetv2x2,28,28,122,122,3,1,122
|
||||
shufflenetv2x2,28,28,122,122,3,1,122
|
||||
shufflenetv2x2,28,28,244,244,3,2,244
|
||||
shufflenetv2x2,28,28,244,244,3,2,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,244,244,3,1,244
|
||||
shufflenetv2x2,14,14,488,488,3,2,488
|
||||
shufflenetv2x2,14,14,488,488,3,2,488
|
||||
shufflenetv2x2,7,7,488,488,3,1,488
|
||||
shufflenetv2x2,7,7,488,488,3,1,488
|
||||
shufflenetv2x2,7,7,488,488,3,1,488
|
||||
mnasnet,112,112,32,32,3,1,32
|
||||
mnasnet,112,112,48,48,3,2,48
|
||||
mnasnet,56,56,72,72,3,1,72
|
||||
mnasnet,56,56,72,72,3,1,72
|
||||
mnasnet,56,56,72,72,5,2,72
|
||||
mnasnet,28,28,120,120,5,1,120
|
||||
mnasnet,28,28,120,120,5,1,120
|
||||
mnasnet,28,28,240,240,5,2,240
|
||||
mnasnet,14,14,480,480,5,1,480
|
||||
mnasnet,14,14,480,480,5,1,480
|
||||
mnasnet,14,14,480,480,3,1,480
|
||||
mnasnet,14,14,576,576,3,1,576
|
||||
mnasnet,14,14,576,576,5,2,576
|
||||
mnasnet,7,7,1152,1152,5,1,1152
|
||||
mnasnet,7,7,1152,1152,5,1,1152
|
||||
mnasnet,7,7,1152,1152,5,1,1152
|
||||
mnasnet,7,7,1152,1152,3,1,1152
|
||||
mobilenetv2,112,112,32,32,3,1,32
|
||||
mobilenetv2,112,112,96,96,3,2,96
|
||||
mobilenetv2,56,56,144,144,3,1,144
|
||||
mobilenetv2,56,56,144,144,3,2,144
|
||||
mobilenetv2,28,28,192,192,3,1,192
|
||||
mobilenetv2,28,28,192,192,3,1,192
|
||||
mobilenetv2,28,28,192,192,3,2,192
|
||||
mobilenetv2,14,14,384,384,3,1,384
|
||||
mobilenetv2,14,14,384,384,3,1,384
|
||||
mobilenetv2,14,14,384,384,3,1,384
|
||||
mobilenetv2,14,14,384,384,3,1,384
|
||||
mobilenetv2,14,14,576,576,3,1,576
|
||||
mobilenetv2,14,14,576,576,3,1,576
|
||||
mobilenetv2,14,14,576,576,3,2,576
|
||||
mobilenetv2,7,7,960,960,3,1,960
|
||||
mobilenetv2,7,7,960,960,3,1,960
|
||||
mobilenetv2,7,7,960,960,3,1,960
|
||||
mobilenetv1,112,112,32,32,3,1,32
|
||||
mobilenetv1,112,112,64,64,3,2,64
|
||||
mobilenetv1,56,56,128,128,3,1,128
|
||||
mobilenetv1,56,56,128,128,3,2,128
|
||||
mobilenetv1,28,28,256,256,3,1,256
|
||||
mobilenetv1,28,28,256,256,3,2,256
|
||||
mobilenetv1,14,14,512,512,3,1,512
|
||||
mobilenetv1,14,14,512,512,3,1,512
|
||||
mobilenetv1,14,14,512,512,3,1,512
|
||||
mobilenetv1,14,14,512,512,3,1,512
|
||||
mobilenetv1,14,14,512,512,3,1,512
|
||||
mobilenetv1,14,14,512,512,3,2,512
|
||||
mobilenetv1,7,7,1024,1024,3,1,1024
|
||||
nasneta,111,111,11,11,5,2,11
|
||||
nasneta,56,56,11,11,5,1,11
|
||||
nasneta,111,111,32,32,7,2,32
|
||||
nasneta,56,56,11,11,7,1,11
|
||||
nasneta,111,111,32,32,7,2,32
|
||||
nasneta,56,56,11,11,7,1,11
|
||||
nasneta,111,111,32,32,5,2,32
|
||||
nasneta,56,56,11,11,5,1,11
|
||||
nasneta,56,56,11,11,3,1,11
|
||||
nasneta,56,56,11,11,3,1,11
|
||||
nasneta,56,56,22,22,5,2,22
|
||||
nasneta,28,28,22,22,5,1,22
|
||||
nasneta,56,56,22,22,7,2,22
|
||||
nasneta,28,28,22,22,7,1,22
|
||||
nasneta,56,56,22,22,7,2,22
|
||||
nasneta,28,28,22,22,7,1,22
|
||||
nasneta,56,56,22,22,5,2,22
|
||||
nasneta,28,28,22,22,5,1,22
|
||||
nasneta,28,28,22,22,3,1,22
|
||||
nasneta,28,28,22,22,3,1,22
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,5,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,44,44,3,1,44
|
||||
nasneta,28,28,88,88,5,2,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,28,28,88,88,7,2,88
|
||||
nasneta,14,14,88,88,7,1,88
|
||||
nasneta,28,28,88,88,7,2,88
|
||||
nasneta,14,14,88,88,7,1,88
|
||||
nasneta,28,28,88,88,5,2,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,5,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,88,88,3,1,88
|
||||
nasneta,14,14,176,176,5,2,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,14,14,176,176,7,2,176
|
||||
nasneta,7,7,176,176,7,1,176
|
||||
nasneta,14,14,176,176,7,2,176
|
||||
nasneta,7,7,176,176,7,1,176
|
||||
nasneta,14,14,176,176,5,2,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,5,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
nasneta,7,7,176,176,3,1,176
|
||||
efficientnetb0,112,112,32,32,3,1,32
|
||||
efficientnetb0,112,112,96,96,3,2,96
|
||||
efficientnetb0,56,56,144,144,3,1,144
|
||||
efficientnetb0,56,56,144,144,5,2,144
|
||||
efficientnetb0,28,28,240,240,5,1,240
|
||||
efficientnetb0,28,28,240,240,3,2,240
|
||||
efficientnetb0,14,14,480,480,3,1,480
|
||||
efficientnetb0,14,14,480,480,3,1,480
|
||||
efficientnetb0,14,14,480,480,5,1,480
|
||||
efficientnetb0,14,14,672,672,5,1,672
|
||||
efficientnetb0,14,14,672,672,5,1,672
|
||||
efficientnetb0,14,14,672,672,5,2,672
|
||||
efficientnetb0,7,7,1152,1152,5,1,1152
|
||||
efficientnetb0,7,7,1152,1152,5,1,1152
|
||||
efficientnetb0,7,7,1152,1152,5,1,1152
|
||||
efficientnetb0,7,7,1152,1152,3,1,1152
|
||||
mobilenetv3,112,112,16,16,3,1,16
|
||||
mobilenetv3,112,112,64,64,3,2,64
|
||||
mobilenetv3,56,56,72,72,3,1,72
|
||||
mobilenetv3,56,56,72,72,5,2,72
|
||||
mobilenetv3,28,28,120,120,5,1,120
|
||||
mobilenetv3,28,28,120,120,5,1,120
|
||||
mobilenetv3,28,28,240,240,3,2,240
|
||||
mobilenetv3,14,14,200,200,3,1,200
|
||||
mobilenetv3,14,14,184,184,3,1,184
|
||||
mobilenetv3,14,14,184,184,3,1,184
|
||||
mobilenetv3,14,14,480,480,3,1,480
|
||||
mobilenetv3,14,14,672,672,3,1,672
|
||||
mobilenetv3,14,14,672,672,5,2,672
|
||||
mobilenetv3,7,7,960,960,5,1,960
|
||||
mobilenetv3,7,7,960,960,5,1,960
|
|
|
@ -0,0 +1,29 @@
|
|||
model,cin,cout
|
||||
alexnet,9216,4096
|
||||
alexnet,4096,4096
|
||||
alexnet,4096,1000
|
||||
vgg11,25088,4096
|
||||
vgg11,4096,4096
|
||||
vgg11,4096,1000
|
||||
vgg13,25088,4096
|
||||
vgg13,4096,4096
|
||||
vgg13,4096,1000
|
||||
vgg16,25088,4096
|
||||
vgg16,4096,4096
|
||||
vgg16,4096,1000
|
||||
vgg19,25088,4096
|
||||
vgg19,4096,4096
|
||||
vgg19,4096,1000
|
||||
densenet161,2208,1000
|
||||
densenet121,1024,1000
|
||||
inceptionv3,2048,1000
|
||||
googlenet,1024,1000
|
||||
shufflenetv2x1-0,1024,1000
|
||||
shufflenetv2x2,2048,1000
|
||||
resnext50-32x4d,2048,1000
|
||||
resnet18,512,1000
|
||||
resnet34,512,1000
|
||||
resnet50,2048,1000
|
||||
wide_resnet50_2,2048,1000
|
||||
mnasnet,1280,1000
|
||||
mobilenetv2,1280,1000
|
|
|
@ -0,0 +1,55 @@
|
|||
model,input_h,input_w,cin,cout,ks,stride
|
||||
alexnet,55,55,64,64,3,2
|
||||
alexnet,27,27,192,192,3,2
|
||||
alexnet,13,13,256,256,3,2
|
||||
squeezenet,109,109,96,96,3,2
|
||||
squeezenet,54,54,256,256,3,2
|
||||
squeezenet,27,27,512,512,3,2
|
||||
vgg11,224,224,64,64,2,2
|
||||
vgg11,112,112,128,128,2,2
|
||||
vgg11,56,56,256,256,2,2
|
||||
vgg11,28,28,512,512,2,2
|
||||
vgg11,14,14,512,512,2,2
|
||||
vgg13,224,224,64,64,2,2
|
||||
vgg13,112,112,128,128,2,2
|
||||
vgg13,56,56,256,256,2,2
|
||||
vgg13,28,28,512,512,2,2
|
||||
vgg13,14,14,512,512,2,2
|
||||
vgg16,224,224,64,64,2,2
|
||||
vgg16,112,112,128,128,2,2
|
||||
vgg16,56,56,256,256,2,2
|
||||
vgg16,28,28,512,512,2,2
|
||||
vgg16,14,14,512,512,2,2
|
||||
vgg19,224,224,64,64,2,2
|
||||
vgg19,112,112,128,128,2,2
|
||||
vgg19,56,56,256,256,2,2
|
||||
vgg19,28,28,512,512,2,2
|
||||
vgg19,14,14,512,512,2,2
|
||||
densenet161,112,112,96,96,3,2
|
||||
densenet161,56,56,192,192,2,2
|
||||
densenet161,28,28,384,384,2,2
|
||||
densenet161,14,14,1056,1056,2,2
|
||||
densenet121,112,112,64,64,3,2
|
||||
densenet121,56,56,128,128,2,2
|
||||
densenet121,28,28,256,256,2,2
|
||||
densenet121,14,14,512,512,2,2
|
||||
googlenet,112,112,64,64,3,2
|
||||
googlenet,56,56,192,192,3,2
|
||||
googlenet,28,28,192,192,3,1
|
||||
googlenet,28,28,256,256,3,1
|
||||
googlenet,28,28,480,480,3,2
|
||||
googlenet,14,14,480,480,3,1
|
||||
googlenet,14,14,512,512,3,1
|
||||
googlenet,14,14,512,512,3,1
|
||||
googlenet,14,14,512,512,3,1
|
||||
googlenet,14,14,528,528,3,1
|
||||
googlenet,14,14,832,832,2,2
|
||||
googlenet,7,7,832,832,3,1
|
||||
googlenet,7,7,832,832,3,1
|
||||
shufflenetv2x1-0,112,112,24,24,3,2
|
||||
shufflenetv2x2,112,112,24,24,3,2
|
||||
resnext50-32x4d,112,112,64,64,3,2
|
||||
resnet18,112,112,64,64,3,2
|
||||
resnet34,112,112,64,64,3,2
|
||||
resnet50,112,112,64,64,3,2
|
||||
wide_resnet50_2,112,112,64,64,3,2
|
|
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
import pandas as pd
|
||||
|
||||
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def read_conv_zoo(filename = "conv.csv"):
|
||||
filename = os.path.join(BASE_DIR, filename)
|
||||
conv_df = pd.read_csv(filename)
|
||||
hws = conv_df["input_h"]
|
||||
cins = conv_df["cin"]
|
||||
couts = conv_df["cout"]
|
||||
ks = conv_df["ks"]
|
||||
strides = conv_df["stride"]
|
||||
return hws, cins, couts, ks, strides
|
||||
|
||||
|
||||
def read_dwconv_zoo(filename = "dwconv.csv"):
|
||||
filename = os.path.join(BASE_DIR, filename)
|
||||
dwconv_df = pd.read_csv(filename)
|
||||
hws = dwconv_df["input_h"]
|
||||
cins = dwconv_df["cin"]
|
||||
ks = dwconv_df["ks"]
|
||||
strides = dwconv_df["stride"]
|
||||
return hws, cins, ks, strides
|
||||
|
||||
|
||||
def read_fc_zoo(filename = "fc.csv"):
|
||||
filename = os.path.join(BASE_DIR, filename)
|
||||
fc_df = pd.read_csv(filename)
|
||||
cins = fc_df["cin"]
|
||||
couts = fc_df["cout"]
|
||||
return cins, couts
|
||||
|
||||
|
||||
def read_pool_zoo(filename = "pooling.csv"):
|
||||
filename = os.path.join(BASE_DIR, filename)
|
||||
pool_df = pd.read_csv(filename)
|
||||
hws = pool_df["input_h"]
|
||||
cins = pool_df["cin"]
|
||||
ks = pool_df["ks"]
|
||||
strides = pool_df["stride"]
|
||||
return hws, cins, ks, strides
|
|
@ -0,0 +1,276 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import random
|
||||
import numpy as np
|
||||
from .prior_config_lib.utils import *
|
||||
|
||||
|
||||
def inverse_transform_sampling(data, n_bins = 40, n_samples = 1000):
|
||||
''' calculate inversed cdf, for sampling by possibility
|
||||
'''
|
||||
import scipy.interpolate as interpolate
|
||||
hist, bin_edges = np.histogram(data, bins=n_bins, density=True)
|
||||
cum_values = np.zeros(bin_edges.shape)
|
||||
cum_values[1:] = np.cumsum(hist*np.diff(bin_edges))
|
||||
inv_cdf = interpolate.interp1d(cum_values, bin_edges)
|
||||
r = np.random.rand(n_samples)
|
||||
data = inv_cdf(r)
|
||||
ndata = [int(x) for x in data]
|
||||
return ndata
|
||||
|
||||
|
||||
def sample_based_on_distribution(data, count):
|
||||
''' use data to calculate a inversed cdf, and sample `count` data from such distribution
|
||||
'''
|
||||
return inverse_transform_sampling(data, n_samples=count)
|
||||
|
||||
|
||||
def data_validation(data, cdata):
|
||||
''' convert sampled data to valid configuration, e.g.,: kernel size = 1, 3, 5, 7
|
||||
|
||||
@params:
|
||||
data: the origin data value.
|
||||
cdata: valid configuration value.
|
||||
'''
|
||||
newlist = []
|
||||
for da in cdata:
|
||||
value = [abs(da - x) for x in data]
|
||||
newlist.append(value)
|
||||
|
||||
newlist = list(np.asarray(newlist).T)
|
||||
cda = [list(d).index(min(d)) for d in newlist]
|
||||
redata = [cdata[x] for x in cda]
|
||||
return redata
|
||||
|
||||
|
||||
def sampling_conv(count):
|
||||
'''
|
||||
Sampling configs for conv kernels based on conv_zoo, which contains configuration values from existing model zoo for conv kernel.
|
||||
The values are stored in prior_config_lib/conv.csv.
|
||||
Returned params include: (hw, cin, cout, kernel_size, strides)
|
||||
'''
|
||||
hws, cins, couts, kernel_sizes, strides = read_conv_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
new_couts = sample_based_on_distribution(couts, count)
|
||||
|
||||
# 70% of sampled data are from prior distribution
|
||||
count1 = int(count * 0.7)
|
||||
new_hws = sample_based_on_distribution(hws, count1)
|
||||
new_kernel_sizes = sample_based_on_distribution(kernel_sizes, count1)
|
||||
new_strides = sample_based_on_distribution(strides, count1)
|
||||
|
||||
new_kernel_sizes = data_validation(new_kernel_sizes, [1, 3, 5, 7])
|
||||
new_strides = data_validation(new_strides, [1, 2, 4])
|
||||
new_hws = data_validation(new_hws, [1, 3, 7, 8, 13, 14, 27, 28, 32, 56, 112, 224])
|
||||
|
||||
# since conv is the largest and most-challenging kernel, we add some frequently used configuration values
|
||||
new_hws.extend([112] * int((count - count1) * 0.2) + [56] * int((count - count1) * 0.4) + [28] * int((count - count1) * 0.4)) # frequent settings
|
||||
new_kernel_sizes.extend([5] * int((count - count1) * 0.4) + [7] * int((count - count1) * 0.6)) # frequent settings
|
||||
new_strides.extend([2] * int((count - count1) * 0.4) + [1] * int((count - count1) * 0.6)) # frequent settings
|
||||
random.shuffle(new_hws)
|
||||
random.shuffle(new_strides)
|
||||
random.shuffle(new_kernel_sizes)
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin, cout, kernel_size, stride in zip(new_hws, new_cins, new_couts, new_kernel_sizes, new_strides):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
'KERNEL_SIZE': kernel_size,
|
||||
'STRIDES': stride,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_conv_random(count):
|
||||
''' sampling configs for conv kernels based on random
|
||||
Returned params include: (hw, cin, cout, kernel_size, strides)
|
||||
'''
|
||||
hws = [1, 7, 8, 13, 14, 27, 28, 32, 56, 112, 224]
|
||||
kernel_sizes = [1, 3, 5, 7]
|
||||
strides = [1, 2, 4]
|
||||
|
||||
cins = list(range(3, 2160))
|
||||
couts = list(range(16, 2048))
|
||||
new_hws = random.sample(hws * int(count / len(hws)) * 10, count)
|
||||
new_kernel_sizes = random.sample(kernel_sizes * int(count / len(kernel_sizes) * 10), count)
|
||||
new_strides = random.sample(strides * int(count / len(strides) * 10), count)
|
||||
new_cins = random.sample(cins * 10, count)
|
||||
new_couts = random.sample(couts * 18, count)
|
||||
random.shuffle(new_cins)
|
||||
random.shuffle(new_couts)
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin, cout, kernel_size, stride in zip(new_hws, new_cins, new_couts, new_kernel_sizes, new_strides):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
'KERNEL_SIZE': kernel_size,
|
||||
'STRIDES': stride,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_dwconv(count):
|
||||
'''
|
||||
Sampling configs for dwconv kernels based on dwconv zoo, which contains configuration values from existing model zoo for dwconv kernel.
|
||||
The values are stored in prior_config_lib/dwconv.csv.
|
||||
Returned params include: (hw, cin, kernel_size, strides)
|
||||
'''
|
||||
hws, cins, ks, strides = read_dwconv_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
|
||||
count1 = int(count * 0.8)
|
||||
new_hws = sample_based_on_distribution(hws,count1)
|
||||
new_kernel_sizes = sample_based_on_distribution(ks, count1)
|
||||
new_strides = sample_based_on_distribution(strides, count1)
|
||||
|
||||
new_hws = data_validation(new_hws, [1, 3, 7, 14, 28, 56, 112, 224])
|
||||
new_kernel_sizes = data_validation(new_kernel_sizes, [1, 3, 5, 7])
|
||||
new_strides = data_validation(new_strides, [1, 2])
|
||||
|
||||
new_hws.extend([112] * int((count - count1) * 0.4) + [56] * int((count - count1) * 0.4) + [28] * int((count - count1) * 0.2))
|
||||
new_kernel_sizes.extend([5] * int((count - count1) * 0.4) + [7] * int((count - count1) * 0.6))
|
||||
new_strides.extend([2] * int((count - count1) * 0.5) + [1] * int((count - count1) * 0.5))
|
||||
random.shuffle(new_hws)
|
||||
random.shuffle(new_kernel_sizes)
|
||||
random.shuffle(new_strides)
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin, kernel_size, stride in zip(new_hws, new_cins, new_kernel_sizes, new_strides):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
'KERNEL_SIZE': kernel_size,
|
||||
'STRIDES': stride,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_fc(count, fix_cout = 1000):
|
||||
'''
|
||||
Sampling configs for fc kernels based on fc zoo, which contains configuration values from existing model zoo for fc kernel.
|
||||
The values are stored in prior_config_lib/fcs.csv.
|
||||
Returned params include: (cin, cout)
|
||||
'''
|
||||
cins, couts = read_fc_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
if not fix_cout:
|
||||
new_couts = sample_based_on_distribution(couts, count)
|
||||
else:
|
||||
new_couts = [fix_cout] * count
|
||||
ncfgs = []
|
||||
for cin, cout in zip(new_cins, new_couts):
|
||||
c = {
|
||||
'CIN': cin,
|
||||
'COUT': cout,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_pooling(count):
|
||||
'''
|
||||
Sampling configs for pooling kernels based on pooling zoo, which contains configuration values from existing model zoo for pooling kernel.
|
||||
The values are stored in prior_config_lib/pooling.csv.
|
||||
Returned params include: (hw, cin, kernel_size, pool_strides)
|
||||
'''
|
||||
hws, cins, kernel_size, strides = read_pool_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
new_hws = sample_based_on_distribution(hws, count)
|
||||
new_hws = data_validation(new_hws, [14, 28, 56, 112, 224])
|
||||
new_kernel_sizes = data_validation(kernel_size, [2, 3])
|
||||
new_strides = data_validation(strides, [1, 2])
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin, kernel_size, stride in zip(new_hws, new_cins, new_kernel_sizes, new_strides):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
'KERNEL_SIZE': kernel_size,
|
||||
'STRIDES': stride,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_hw_cin(count):
|
||||
''' sampling configs for kernels with hw and cin parameter
|
||||
Returned params include: (hw, cin)
|
||||
'''
|
||||
hws, cins, _, _, _ = read_conv_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
|
||||
count1 = int(count * 0.8)
|
||||
new_hws = sample_based_on_distribution(hws,count1)
|
||||
new_hws = data_validation(new_hws, [1, 3, 7, 14, 28, 56, 112, 224])
|
||||
new_hws.extend([112] * int((count - count1) * 0.4) + [56] * int((count - count1) * 0.4) + [28] * int((count - count1) * 0.2))
|
||||
random.shuffle(new_hws)
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin in zip(new_hws, new_cins):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_hw_cin_odd(count):
|
||||
''' sampling configs for kernels with hw and cin (only odd values) parameter, in case for split / se / channelshuffle
|
||||
Returned params include: (hw, cin)
|
||||
'''
|
||||
hws, cins, _, _, _ = read_conv_zoo()
|
||||
new_cins = sample_based_on_distribution(cins, count)
|
||||
|
||||
count1 = int(count * 0.8)
|
||||
new_hws = sample_based_on_distribution(hws,count1)
|
||||
new_hws = data_validation(new_hws, [1, 3, 7, 14, 28, 56, 112, 224])
|
||||
new_hws.extend([112] * int((count - count1) * 0.4) + [56] * int((count - count1) * 0.4) + [28] * int((count - count1) * 0.2))
|
||||
random.shuffle(new_hws)
|
||||
|
||||
ncfgs = []
|
||||
for hw, cin in zip(new_hws, new_cins):
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN': cin + 1 if cin % 2 else cin,
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
||||
|
||||
|
||||
def sampling_concats(count):
|
||||
''' sampling functions for concat kernel
|
||||
Returned params include: (hw, ns, cin1, cin2, cin3, cin4), ns are in [2, 4]
|
||||
'''
|
||||
hws, cins, _, _, _ = read_conv_zoo()
|
||||
new_hws = sample_based_on_distribution(hws, count)
|
||||
new_cins1 = sample_based_on_distribution(cins, count)
|
||||
new_cins2 = sample_based_on_distribution(cins, count)
|
||||
new_cins3 = sample_based_on_distribution(cins, count)
|
||||
new_cins4 = sample_based_on_distribution(cins, count)
|
||||
|
||||
new_hws = data_validation(new_hws, [7, 14, 28, 56]) # current normals
|
||||
new_ns = [2] * (count - int(count * 0.4) - int(count * 0.2)) + [3] * int(count * 0.2) + [4] * int(count * 0.4)
|
||||
random.shuffle(new_ns)
|
||||
|
||||
ncfgs = []
|
||||
for hw, n, cin1, cin2, cin3, cin4 in zip(new_hws, new_ns, new_cins1, new_cins2, new_cins3, new_cins4):
|
||||
cins = [cin1, cin2, cin3, cin4]
|
||||
onehot = [1] * n + [0] * (4 - n)
|
||||
onehot_cins = [x * y for x, y in zip(onehot, cins)]
|
||||
c = {
|
||||
'HW': hw,
|
||||
'CIN1': onehot_cins[0],
|
||||
'CIN2': onehot_cins[1],
|
||||
'CIN3': onehot_cins[2],
|
||||
'CIN4': onehot_cins[3]
|
||||
}
|
||||
ncfgs.append(c)
|
||||
return ncfgs
|
|
@ -0,0 +1,116 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import logging
|
||||
import importlib
|
||||
from . import config_sampler
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
__BUILTIN_KERNELS__ = {
|
||||
# builtin name: [kernel class name, kernel sampler class name]
|
||||
"conv_bn_relu": ["ConvBnRelu", "ConvSampler"],
|
||||
"conv_bn_relu6": ["ConvBnRelu6", "ConvSampler"],
|
||||
"conv_bn": ["ConvBn", "ConvSampler"],
|
||||
"conv_relu": ["ConvRelu", "ConvSampler"],
|
||||
"conv_relu6": ["ConvRelu6", "ConvSampler"],
|
||||
"conv_hswish": ["ConvHswish", "ConvSampler"],
|
||||
"conv_block": ["ConvBlock", "ConvSampler"],
|
||||
"conv_bn_hswish": ["ConvBnHswish", "ConvSampler"],
|
||||
# dwconv
|
||||
"dwconv_bn": ["DwConvBn", "DwConvSampler"],
|
||||
"dwconv_relu": ["DwConvRelu", "DwConvSampler"],
|
||||
"dwconv_relu6": ["DwConvRelu6", "DwConvSampler"],
|
||||
"dwconv_bn_relu": ["DwConvBnRelu", "DwConvSampler"],
|
||||
"dwconv_bn_relu6": ["DwConvBnRelu6", "DwConvSampler"],
|
||||
"dwconv_block": ["DwConvBlock", "DwConvSampler"],
|
||||
"dwconv_bn_hswish": ["ConvBnHswish", "DwConvSampler"],
|
||||
# others
|
||||
"maxpool_block": ["MaxPoolBlock", "PoolingSampler"],
|
||||
"avgpool_block": ["AvgPoolBlock", "PoolingSampler"],
|
||||
"fc_block": ["FCBlock", "FCSampler"],
|
||||
"concat_block": ["ConcatBlock", "ConcatSampler"],
|
||||
"split_block": ["SplitBlock", "CinOddSampler"],
|
||||
"channel_shuffle": ["ChannelShuffle", "CinOddSampler"],
|
||||
"se_block": ["SEBlock", "CinOddSampler"],
|
||||
"globalavgpool_block": ["GlobalAvgPoolBlock", "GlobalAvgPoolSampler"],
|
||||
"bn_relu": ["BnRelu", "HwCinSampler"],
|
||||
"bn_block": ["BnBlock", "HwCinSampler"],
|
||||
"hswish_block": ["HswishBlock", "HwCinSampler"],
|
||||
"relu_block": ["ReluBlock", "HwCinSampler"],
|
||||
"add_relu": ["AddRelu", "HwCinSampler"],
|
||||
"add_block": ["AddBlock", "HwCinSampler"],
|
||||
}
|
||||
|
||||
|
||||
__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')
|
||||
__registry_cfg_filename__ = 'registry.yaml'
|
||||
__REG_KERNELS__ = {}
|
||||
if os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):
|
||||
with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:
|
||||
registry_modules = yaml.load(fp, yaml.FullLoader)
|
||||
if "kernels" in registry_modules:
|
||||
__REG_KERNELS__ = registry_modules["kernels"]
|
||||
|
||||
|
||||
def generate_model_for_kernel(kernel_type, config, save_path, implement='tensorflow'):
|
||||
""" get the nn model for predictor build. returns: input_tensors, output_tensors, configuration_key, and graphname, they are for saving tensorflow v1.x models
|
||||
"""
|
||||
if implement == 'tensorflow':
|
||||
from nn_meter.builder.nn_generator.tf_networks import blocks
|
||||
elif implement == 'torch':
|
||||
from nn_meter.builder.nn_generator.torch_networks import blocks
|
||||
else:
|
||||
raise NotImplementedError('You must choose one implementation of kernel from "tensorflow" or "pytorch"')
|
||||
|
||||
# get kernel class information
|
||||
if kernel_type in __REG_KERNELS__:
|
||||
kernel_info = __REG_KERNELS__[kernel_type]
|
||||
sys.path.append(kernel_info["package_location"])
|
||||
kernel_name = kernel_info["class_name"]
|
||||
kernel_module = importlib.import_module(kernel_info["class_module"])
|
||||
elif kernel_type in __BUILTIN_KERNELS__:
|
||||
kernel_name = __BUILTIN_KERNELS__[kernel_type][0]
|
||||
kernel_module = blocks
|
||||
|
||||
# get kernel class and create kernel instance by needed_config
|
||||
kernel_class = getattr(kernel_module, kernel_name)(config)
|
||||
input_tensor_shape = kernel_class.input_tensor_shape
|
||||
model = kernel_class.get_model()
|
||||
|
||||
# save model file to savepath
|
||||
kernel_class.save_model(save_path)
|
||||
logging.info(f"{kernel_type} model is generated and saved to {save_path}.")
|
||||
|
||||
return model, input_tensor_shape, config
|
||||
|
||||
|
||||
def get_sampler_for_kernel(kernel_type, sample_num, sampling_mode, configs = None):
|
||||
""" return the list of sampled data configurations in prior and finegrained sampling mode
|
||||
"""
|
||||
# get kernel sampler class information
|
||||
if kernel_type in __REG_KERNELS__:
|
||||
kernel_info = __REG_KERNELS__[kernel_type]
|
||||
sys.path.append(kernel_info["package_location"])
|
||||
sampler_name = kernel_info["sampler_name"]
|
||||
sampler_module = importlib.import_module(kernel_info["sampler_module"])
|
||||
elif kernel_type in __BUILTIN_KERNELS__:
|
||||
sampler_name = __BUILTIN_KERNELS__[kernel_type][1]
|
||||
sampler_module = config_sampler
|
||||
|
||||
# get kernel class and create kernel instance by needed_config
|
||||
sampler_class = getattr(sampler_module, sampler_name)()
|
||||
|
||||
# initialize sampling, based on prior distribution
|
||||
if sampling_mode == 'prior':
|
||||
sampled_cfgs = sampler_class.prior_config_sampling(sample_num)
|
||||
# fine-grained sampling for data with large error points
|
||||
elif sampling_mode == 'finegrained':
|
||||
sampled_cfgs = sampler_class.finegrained_config_sampling(configs, sample_num)
|
||||
return sampled_cfgs
|
||||
|
||||
|
||||
def list_kernels():
|
||||
return list(__BUILTIN_KERNELS__.keys()) + ["* " + item for item in list(__REG_KERNELS__.keys())]
|
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .build_predictor import build_predictor_by_data
|
||||
from .extract_features import get_data_by_profiled_results, BaseFeatureParser
|
|
@ -0,0 +1,72 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import pandas as pd
|
||||
import logging
|
||||
from sklearn.model_selection import train_test_split
|
||||
from .utils import latency_metrics
|
||||
from .predictor_lib import init_predictor
|
||||
from .extract_features import get_feature_parser, get_data_by_profiled_results
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
def build_predictor_by_data(kernel_type, kernel_data, backend = None, error_threshold = 0.1, mark = '', save_path = None):
|
||||
"""
|
||||
build regression model by sampled data and latency, locate data with large-errors. Returns (current predictor, 10% Accuracy, error_cfgs),
|
||||
where error_cfgs represent configuration list, where each item is a configuration for one large-error-data.
|
||||
|
||||
@params
|
||||
kernel_type (str): type of target kernel
|
||||
|
||||
data (tuple): feature (configs) and target (latencies) data
|
||||
|
||||
backend (str): target device, relative to predictor initialization
|
||||
|
||||
error_threshold (float): default = 0.1, should be no less than 0.1. if prediction error (`abs(pred - true) / true`) > error_threshold,
|
||||
we treat this data as a large-error-data.
|
||||
|
||||
mark (str): the mark for the running results. Defaults to ''.
|
||||
|
||||
save_path (str): the folder to save results file such as feature table and predictor pkl file
|
||||
"""
|
||||
feature_parser = get_feature_parser(kernel_type)
|
||||
data = get_data_by_profiled_results(kernel_type, feature_parser, kernel_data, save_path=os.path.join(save_path, f'Data_{kernel_type}_{mark}.csv'))
|
||||
|
||||
# get data for regression
|
||||
X, Y = data
|
||||
trainx, testx, trainy, testy = train_test_split(X, Y, test_size = 0.2, random_state = 10)
|
||||
logging.info(f"training data size: {len(trainx)}, test data size: {len(testx)}")
|
||||
|
||||
# initialize the regression model based on `RandomForestRegressor`
|
||||
predictor = init_predictor(kernel_type, backend)
|
||||
|
||||
# start training
|
||||
predictor.fit(trainx, trainy)
|
||||
predicts = predictor.predict(testx)
|
||||
pred_error_list = [abs(y1 - y2) / y1 for y1, y2 in zip(testy, predicts)]
|
||||
rmse, rmspe, error, acc5, acc10, acc15 = latency_metrics(predicts, testy)
|
||||
logging.info(f"rmse: {rmse:.4f}; rmspe: {rmspe:.4f}; error: {error:.4f}; 5% accuracy: {acc5:.4f}; 10% accuracy: {acc10:.4f}; 15% accuracy: {acc15:.4f}.")
|
||||
|
||||
# dump the test set with predicts to csv file
|
||||
test_res = pd.DataFrame(testx, columns=[f'feature{i}' for i in range(len(testx[0]))])
|
||||
test_res["True"] = testy
|
||||
test_res["Pred"] = predicts
|
||||
test_res["Error"] = pred_error_list
|
||||
test_res.to_csv(os.path.join(save_path, f"TestResult_{kernel_type}_{mark}.csv"), index=False)
|
||||
logging.info(f"All test data and predicted results are stored in path {os.path.join(save_path, f'TestResult_{kernel_type}_{mark}.csv')}")
|
||||
|
||||
# dump the predictor model
|
||||
import pickle
|
||||
save_path = os.path.join(save_path, f"Predictor_{kernel_type}_{mark}.pkl")
|
||||
with open(save_path, 'wb') as fp:
|
||||
pickle.dump(predictor, fp)
|
||||
logging.keyinfo(f"Saved the predictor for {kernel_type} in path {save_path}.")
|
||||
|
||||
# locate large error data
|
||||
error_configs = []
|
||||
for i in range(len(testx)):
|
||||
if pred_error_list[i] > error_threshold:
|
||||
error_config = feature_parser.get_config_by_feature(testx[i])
|
||||
error_configs.append(error_config)
|
||||
|
||||
return predictor, acc10, error_configs
|
|
@ -0,0 +1,182 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import json
|
||||
import logging
|
||||
import importlib
|
||||
from nn_meter.builder.backend_meta.utils import read_profiled_results
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
feature_for_kernel = {
|
||||
# conv
|
||||
"conv_bn_relu": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_bn_relu6": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_bn": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_relu": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_relu6": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_hswish": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_block": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
"conv_bn_hswish": ["HW", "CIN", "COUT", "KERNEL_SIZE", "STRIDES"],
|
||||
# dwconv
|
||||
"dwconv_bn": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_relu": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_relu6": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_bn_relu": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_bn_relu6": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_block": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
"dwconv_bn_hswish": ["HW", "CIN", "KERNEL_SIZE", "STRIDES"],
|
||||
# others
|
||||
"maxpool_block": ["HW", "CIN", "KERNEL_SIZE", "POOL_STRIDES"],
|
||||
"avgpool_block": ["HW", "CIN", "KERNEL_SIZE", "POOL_STRIDES"],
|
||||
"fc_block": ["CIN", "COUT"],
|
||||
"concat_block": ["HW", "CIN1", "CIN2", "CIN3", "CIN4"],
|
||||
"split_block": ["HW", "CIN"],
|
||||
"channel_shuffle": ["HW", "CIN"],
|
||||
"se_block": ["HW", "CIN"],
|
||||
"globalavgpool_block": ["HW", "CIN"],
|
||||
"bn_relu": ["HW", "CIN"],
|
||||
"bn_block": ["HW", "CIN"],
|
||||
"hswish_block": ["HW", "CIN"],
|
||||
"relu_block": ["HW", "CIN"],
|
||||
"add_relu": ["HW", "CIN"],
|
||||
"add_block": ["HW", "CIN"],
|
||||
}
|
||||
|
||||
__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')
|
||||
__registry_cfg_filename__ = 'registry.yaml'
|
||||
__REG_KERNELS__ = {}
|
||||
if os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):
|
||||
with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:
|
||||
registry_modules = yaml.load(fp, yaml.FullLoader)
|
||||
if "kernels" in registry_modules:
|
||||
__REG_KERNELS__ = registry_modules["kernels"]
|
||||
|
||||
|
||||
class BaseFeatureParser:
|
||||
def __init__(self, kernel_type):
|
||||
self.kernel_type = kernel_type
|
||||
self.needed_config = feature_for_kernel[kernel_type]
|
||||
|
||||
def get_feature_by_config(self, config_dict):
|
||||
feature = [config_dict[data] for data in self.needed_config]
|
||||
return feature
|
||||
|
||||
def get_config_by_feature(self, feature):
|
||||
assert len(self.needed_config) == len(feature)
|
||||
config = {k: v for k, v in zip(self.needed_config, feature)}
|
||||
return config
|
||||
|
||||
|
||||
class FlopsParamParser(BaseFeatureParser):
|
||||
def get_feature_by_config(self, config_dict):
|
||||
feature = [config_dict[data] for data in self.needed_config]
|
||||
from .utils import get_flops_params
|
||||
flop, param = get_flops_params(self.kernel_type, config_dict)
|
||||
flop /= 2e6
|
||||
param /= 1e6
|
||||
feature.extend([flop, param])
|
||||
return feature
|
||||
|
||||
def get_config_by_feature(self, feature):
|
||||
# remove flops and params num feature from feature vector
|
||||
feature = feature[:-2]
|
||||
assert len(self.needed_config) == len(feature)
|
||||
config = {k: v for k, v in zip(self.needed_config, feature)}
|
||||
return config
|
||||
|
||||
|
||||
def get_feature_parser(kernel_type):
|
||||
if kernel_type in __REG_KERNELS__:
|
||||
kernel_info = __REG_KERNELS__[kernel_type]
|
||||
sys.path.append(kernel_info["package_location"])
|
||||
parser_name = kernel_info["parser_name"]
|
||||
parser_module = importlib.import_module(kernel_info["parser_module"])
|
||||
return getattr(parser_module, parser_name)(kernel_type)
|
||||
elif kernel_type in feature_for_kernel:
|
||||
if "conv" in kernel_type or "dwconv" in kernel_type or "fc" in kernel_type:
|
||||
return FlopsParamParser(kernel_type)
|
||||
else:
|
||||
return BaseFeatureParser(kernel_type)
|
||||
|
||||
|
||||
def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, lats_path = None, save_path = None):
|
||||
''' return (features, latency)
|
||||
kernel_type (str): type of kernel
|
||||
|
||||
feature_parser (subclass instance of BaseFeatureParser) the parser containing the feature parsing script
|
||||
|
||||
cfgs_path: path of config information dict, or dict of "origin_kernels.json", such as
|
||||
{
|
||||
"conv_bn_relu": {
|
||||
"id_0": {
|
||||
"model": "...",
|
||||
"shapes": [[14, 14, 98]],
|
||||
"config": {
|
||||
"HW": 14,
|
||||
"CIN": 98,
|
||||
"COUT": 120,
|
||||
"KERNEL_SIZE": 3,
|
||||
"STRIDES": 1
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lats_path: pathe of profiled latency information dict, or dict of "profiled_results", such as
|
||||
{
|
||||
"conv_bn_relu": {
|
||||
"id_0": {
|
||||
"latency": "42.001 +- 1.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
if lats_path == None, it means latency information are also included in cfgs_path.
|
||||
|
||||
save_path: the path to save the feature and latency information
|
||||
'''
|
||||
if lats_path == None:
|
||||
if type(cfgs_path) == tuple:
|
||||
cfgs_path, lats_path = cfgs_path
|
||||
else:
|
||||
lats_path = cfgs_path
|
||||
if isinstance(cfgs_path, str):
|
||||
with open(cfgs_path, 'r') as fp:
|
||||
cfgs_dict = json.load(fp)[kernel_type]
|
||||
else:
|
||||
cfgs_dict = cfgs_path[kernel_type] if kernel_type in cfgs_path else cfgs_path
|
||||
if isinstance(lats_path, str):
|
||||
with open(lats_path, 'r') as fp:
|
||||
lats_dict = read_profiled_results(json.load(fp))[kernel_type]
|
||||
else:
|
||||
lats_dict = lats_path[kernel_type] if kernel_type in lats_path else lats_path
|
||||
|
||||
paths, features, lats = [], [], []
|
||||
for id in lats_dict.keys():
|
||||
try:
|
||||
path = cfgs_dict[id]["model"]
|
||||
configs = cfgs_dict[id]["config"]
|
||||
feature = feature_parser.get_feature_by_config(configs)
|
||||
latency = lats_dict[id]["latency"].avg
|
||||
if latency != 0.0:
|
||||
paths.append(os.path.basename(path))
|
||||
features.append(feature)
|
||||
lats.append(latency)
|
||||
except:
|
||||
pass
|
||||
|
||||
# save features and latency information to `save_path`
|
||||
if save_path:
|
||||
import pandas as pd
|
||||
cols = feature_parser.needed_config[:]
|
||||
if len(features[0]) - len(feature_parser.needed_config) > 0:
|
||||
cols += [f'feature_{i}' for i in range(len(features[0]) - len(feature_parser.needed_config))]
|
||||
data_df = pd.DataFrame(features, columns=cols)
|
||||
data_df = pd.concat([pd.DataFrame(paths, columns=["model_path"]), data_df], axis=1)
|
||||
data_df["latency_ms"] = lats
|
||||
data_df.to_csv(save_path, index=False)
|
||||
logging.info(f'Saved the feature table of all data for {kernel_type} in path {save_path}.')
|
||||
|
||||
return (features, lats)
|
|
@ -0,0 +1,409 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from sklearn.ensemble import RandomForestRegressor
|
||||
|
||||
|
||||
__PREDICTOR_ZOO__ = {
|
||||
"conv_bn_relu": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 70,
|
||||
"n_estimators": 320,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 6,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 80,
|
||||
"n_estimators": 550,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"n_jobs": 32,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 100,
|
||||
"n_estimators": 500,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"n_jobs": 32,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"dwconv_bn_relu": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 240,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 6,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 40,
|
||||
"n_estimators": 240,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 7,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 100,
|
||||
"n_estimators": 650,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"n_jobs": 32,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"fc_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 70,
|
||||
"n_estimators": 330,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 4,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 70,
|
||||
"n_estimators": 330,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 4,
|
||||
"oob_score": True,
|
||||
"n_jobs": 32,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"channel_shuffle": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"se_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 20,
|
||||
"n_estimators": 290,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 110,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"maxpool_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 210,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"globalavgpool_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 70,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"hswish_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 110,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"avgpool_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 390,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"bn_relu": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 570,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"relu_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"bn_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 370,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 390,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"concat_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 100,
|
||||
"n_estimators": 690,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 100,
|
||||
"n_estimators": 690,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 5,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"add_relu": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 570,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 3,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"tflite_gpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 570,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 3,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
},
|
||||
"openvino_vpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 570,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 3,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
},
|
||||
"split_block": {
|
||||
"tflite_cpu": {
|
||||
"max_depth": 50,
|
||||
"n_estimators": 190,
|
||||
"min_samples_leaf": 1,
|
||||
"min_samples_split": 2,
|
||||
"max_features": 2,
|
||||
"oob_score": True,
|
||||
"random_state": 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def init_predictor(kernel_type, backend):
|
||||
try:
|
||||
model_param = __PREDICTOR_ZOO__[kernel_type][backend]
|
||||
model = RandomForestRegressor(**model_param)
|
||||
except:
|
||||
model = RandomForestRegressor(
|
||||
max_depth = 50,
|
||||
n_estimators = 370,
|
||||
min_samples_leaf = 1,
|
||||
min_samples_split = 2,
|
||||
max_features = "auto",
|
||||
oob_score = True,
|
||||
random_state = 10,
|
||||
)
|
||||
return model
|
|
@ -0,0 +1,49 @@
|
|||
import numpy as np
|
||||
from sklearn.metrics import mean_squared_error
|
||||
|
||||
|
||||
def get_accuracy(y_pred, y_true, threshold = 0.01):
|
||||
a = (y_true - y_pred) / y_true
|
||||
b = np.where(abs(a) <= threshold)
|
||||
return len(b[0]) / len(y_true)
|
||||
|
||||
|
||||
def latency_metrics(y_pred, y_true):
|
||||
rmspe = (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))) * 100
|
||||
rmse = np.sqrt(mean_squared_error(y_pred, y_true))
|
||||
acc5 = get_accuracy(y_pred, y_true, threshold=0.05)
|
||||
acc10 = get_accuracy(y_pred, y_true, threshold=0.10)
|
||||
acc15 = get_accuracy(y_pred, y_true, threshold=0.15)
|
||||
return rmse, rmspe, rmse / np.mean(y_true), acc5, acc10, acc15
|
||||
|
||||
|
||||
def get_conv_flop_params(hw, cin, cout, kernel_size, stride):
|
||||
params = cout * (kernel_size * kernel_size * cin + 1)
|
||||
flops = 2 * hw / stride * hw / stride * params
|
||||
return flops, params
|
||||
|
||||
|
||||
def get_dwconv_flop_params(hw, cout, kernel_size, stride):
|
||||
params = cout * (kernel_size * kernel_size + 1)
|
||||
flops = 2 * hw / stride * hw / stride * params
|
||||
return flops, params
|
||||
|
||||
|
||||
def get_fc_flop_params(cin, cout):
|
||||
params = (2 * cin + 1) * cout
|
||||
flops = params
|
||||
return flops, params
|
||||
|
||||
|
||||
def get_flops_params(kernel_type, config):
|
||||
if "dwconv" in kernel_type:
|
||||
hw, cin, kernel_size, stride = config["HW"], config["CIN"], \
|
||||
config["KERNEL_SIZE"], config["STRIDES"]
|
||||
return get_dwconv_flop_params(hw, cin, kernel_size, stride)
|
||||
elif "conv" in kernel_type:
|
||||
hw, cin, cout, kernel_size, stride = config["HW"], config["CIN"], \
|
||||
config["COUT"], config["KERNEL_SIZE"], config["STRIDES"]
|
||||
return get_conv_flop_params(hw, cin, cout, kernel_size, stride)
|
||||
elif "fc" in kernel_type:
|
||||
cin, cout = config["CIN"], config["COUT"]
|
||||
return get_fc_flop_params(cin, cout)
|
|
@ -0,0 +1,3 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
from .interface import BaseOperator, BaseBlock
|
|
@ -0,0 +1,57 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
|
||||
class BaseOperator:
|
||||
def __init__(self, input_shape, config=None):
|
||||
''' base class for operator.
|
||||
|
||||
@params
|
||||
|
||||
- `input_shape`: defines the dimension of one model input shape without batch size. Generally, when the input shape is 3D, `input_shape`
|
||||
should be `[config["HW"], config["HW"], config["CIN"]]` (for tensorflow model), or `config["CIN"], [config["HW"], config["HW"]]` (for
|
||||
torch model), and when the input shape is 1D, `input_shape` should be`[config["CIN"]]`.
|
||||
|
||||
- `config`: a dict containing all configurations.
|
||||
'''
|
||||
self.input_shape = input_shape
|
||||
self.config = config
|
||||
|
||||
def get_model(self):
|
||||
pass
|
||||
|
||||
def get_output_shape(self):
|
||||
return self.input_shape
|
||||
|
||||
def get_is_two_inputs(self):
|
||||
return False
|
||||
|
||||
def test_operator():
|
||||
''' for users to test the model when registration. Do not need to override by users.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class BaseBlock:
|
||||
def __init__(self, config):
|
||||
''' base class for kernel block.
|
||||
|
||||
@params
|
||||
|
||||
- `input_shape`: defines the dimension of one model input shape without batch size. Generally, when the input shape is 3D, `input_shape`
|
||||
should be `[config["HW"], config["HW"], config["CIN"]]` (for tensorflow model), or `config["CIN"], [config["HW"], config["HW"]]` (for
|
||||
torch model), and when the input shape is 1D, `input_shape` should be`[config["CIN"]]`.
|
||||
|
||||
- `input_tensor_shape`: a list defining all model inputs. In basic situation, `input_tensor_shape` should be `[input_shape]` if the kernel
|
||||
only has one input. If the kernel has more than one input, such as `add_relu` kernel, `input_tensor_shape` is `[input_shape, input_shape]`.
|
||||
'''
|
||||
self.config = config
|
||||
self.input_shape = None
|
||||
self.input_tensor_shape = None
|
||||
|
||||
def get_model(self):
|
||||
''' the implementation of the kernel model and return a instance of `tensorflow.keras.Model` or `torch.nn.Module` of the kernel.
|
||||
'''
|
||||
pass
|
||||
|
||||
def test_block(self):
|
||||
pass
|
|
@ -0,0 +1,2 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
|
@ -0,0 +1,931 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import logging
|
||||
import tensorflow as tf
|
||||
import tensorflow.keras as keras
|
||||
from .operators import *
|
||||
from ..interface import BaseBlock
|
||||
from .utils import get_inputs_by_shapes
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class TFBlock(BaseBlock):
|
||||
def test_block(self):
|
||||
import os, shutil
|
||||
from typing import List
|
||||
model_path = "./temp_model"
|
||||
model = self.get_model()
|
||||
model_output = model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
|
||||
# check model save and reload
|
||||
keras.models.save_model(model, model_path)
|
||||
restore_model = keras.models.load_model(model_path)
|
||||
if isinstance(model_output, List):
|
||||
output_shape = [mod.shape for mod in model_output]
|
||||
restore_output_shape = [mod.shape for mod in restore_model(get_inputs_by_shapes(self.input_tensor_shape))]
|
||||
else:
|
||||
output_shape = model_output.shape
|
||||
restore_output_shape = restore_model(get_inputs_by_shapes(self.input_tensor_shape)).shape
|
||||
assert output_shape == restore_output_shape
|
||||
shutil.rmtree(model_path)
|
||||
|
||||
# check model convert to tflite
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(restore_model)
|
||||
tflite_model = converter.convert()
|
||||
open(model_path + '.tflite', 'wb').write(tflite_model)
|
||||
os.remove(model_path + '.tflite')
|
||||
logging.keyinfo("Testing block is success!")
|
||||
|
||||
def save_model(self, save_path):
|
||||
model = self.get_model()
|
||||
keras.models.save_model(model, save_path)
|
||||
|
||||
|
||||
class ConvBnRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, bn_op, relu_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
model = Model(self.conv_op, self.bn_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBnRelu6(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu6_op = Relu6(out_shape, config)
|
||||
self.relu6_op = relu6_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, bn_op, relu6_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
self.relu6 = relu6_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu6(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.bn_op, self.relu6_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBn(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op = bn_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, bn_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.bn_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, relu_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvRelu6(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
relu6_op = Relu6(out_shape, config)
|
||||
self.relu6_op = relu6_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, relu6_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.relu6 = relu6_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.relu6(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.relu6_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvHswish(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
hswish_op = Hswish(out_shape, config)
|
||||
self.hswish_op = hswish_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, hswish_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.hswish = hswish_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.hswish(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.hswish_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op = conv_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.conv(inputs)
|
||||
|
||||
model = Model(self.conv_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBnHswish(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
hswish_op = Hswish(out_shape, config)
|
||||
self.hswish_op = hswish_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, bn_op, hswish_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
self.hswish = hswish_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.hswish(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.bn_op, self.hswish_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBnReluMaxPool(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op, out_shape = relu_op.get_model(), relu_op.get_output_shape()
|
||||
|
||||
maxpool_op = MaxPool(out_shape, config)
|
||||
self.maxpool_op = maxpool_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, conv_op, bn_op, relu_op, maxpool_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
self.maxpool = maxpool_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
x = self.maxpool(x)
|
||||
return x
|
||||
|
||||
model = Model(self.conv_op, self.bn_op, self.relu_op, self.maxpool_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvBn(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op = bn_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, bn_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.bn = bn_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.bn(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.bn_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, relu_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvRelu6(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
relu6_op = Relu6(out_shape, config)
|
||||
self.relu6_op = relu6_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, relu6_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.relu6 = relu6_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.relu6(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.relu6_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvBnRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, bn_op, relu_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.bn_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvBnRelu6(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu6_op = Relu6(out_shape, config)
|
||||
self.relu6_op = relu6_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, bn_op, relu6_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.bn = bn_op
|
||||
self.relu6 = relu6_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu6(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.bn_op, self.relu6_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class DwConvBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op = dwconv_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.dwconv(inputs)
|
||||
|
||||
model = Model(self.dwconv_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConvBnHswish(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
hswish_op = Hswish(out_shape, config)
|
||||
self.hswish_op = hswish_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, dwconv_op, bn_op, hswish_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.bn = bn_op
|
||||
self.hswish = hswish_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.hswish(x)
|
||||
return x
|
||||
|
||||
model = Model(self.dwconv_op, self.bn_op, self.hswish_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class MaxPoolBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
maxpool_op = MaxPool(self.input_shape, config)
|
||||
self.maxpool_op = maxpool_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, maxpool_op):
|
||||
super().__init__()
|
||||
self.maxpool = maxpool_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.maxpool(inputs)
|
||||
|
||||
model = Model(self.maxpool_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class AvgPoolBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
avgpool_op = AvgPool(self.input_shape, config)
|
||||
self.avgpool_op = avgpool_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, avgpool_op):
|
||||
super().__init__()
|
||||
self.avgpool = avgpool_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.avgpool(inputs)
|
||||
|
||||
model = Model(self.avgpool_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class FCBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
fc_op = FC(self.input_shape, config)
|
||||
self.fc_op = fc_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, fc_op):
|
||||
super().__init__()
|
||||
self.fc = fc_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.fc(inputs)
|
||||
|
||||
model = Model(self.fc_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ConcatBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [[config["HW"], config["HW"], cin]
|
||||
for cin in [config['CIN1'], config['CIN2'], config['CIN3'], config['CIN4']]
|
||||
if cin != 0]
|
||||
self.input_tensor_shape = self.input_shape
|
||||
|
||||
concat_op = Concat(self.input_shape, config)
|
||||
self.concat_op = concat_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, concat_op):
|
||||
super().__init__()
|
||||
self.concat = concat_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.concat(inputs)
|
||||
|
||||
model = Model(self.concat_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class SplitBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
split_op = Split(self.input_shape, config)
|
||||
self.split_op = split_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, split_op):
|
||||
super().__init__()
|
||||
self.split = split_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.split(inputs)
|
||||
|
||||
model = Model(self.split_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ChannelShuffle(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def call(self, inputs):
|
||||
_, h, w, c = inputs.get_shape().as_list()
|
||||
x = tf.reshape(inputs, [-1, h, w, 2, c // 2])
|
||||
x = tf.transpose(x, (0, 1, 2, 4, 3))
|
||||
x = tf.reshape(x, [-1, h, w, c])
|
||||
return x
|
||||
|
||||
model = Model()
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class SEBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
se_op = SE(self.input_shape, config)
|
||||
self.se_op = se_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(tf.keras.Model):
|
||||
def __init__(self, se_op):
|
||||
super().__init__()
|
||||
self.se = se_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.se(inputs)
|
||||
|
||||
model = Model(self.se_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class GlobalAvgPoolBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
globalavgpool_op = GlobalAvgpool(self.input_shape, config)
|
||||
self.globalavgpool_op = globalavgpool_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, globalavgpool_op):
|
||||
super().__init__()
|
||||
self.globalavgpool = globalavgpool_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.globalavgpool(inputs)
|
||||
|
||||
model = Model(self.globalavgpool_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class BnRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
bn_op = BN(self.input_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, bn_op, relu_op):
|
||||
super().__init__()
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.bn(inputs)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
model = Model(self.bn_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class BnBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
bn_op = BN(self.input_shape, config)
|
||||
self.bn_op = bn_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, bn_op):
|
||||
super().__init__()
|
||||
self.bn = bn_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.bn(inputs)
|
||||
|
||||
model = Model(self.bn_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class HswishBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
hswish_op = Hswish(self.input_shape, config)
|
||||
self.hswish_op = hswish_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, hswish_op):
|
||||
super().__init__()
|
||||
self.hswish = hswish_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.hswish(inputs)
|
||||
|
||||
model = Model(self.hswish_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class ReluBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
relu_op = Relu(self.input_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, relu_op):
|
||||
super().__init__()
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.relu(inputs)
|
||||
|
||||
model = Model(self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class AddRelu(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
add_op = Add(self.input_shape, config)
|
||||
self.add_op, out_shape = add_op.get_model(), add_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, add_op, relu_op):
|
||||
super().__init__()
|
||||
self.add = add_op
|
||||
self.relu = relu_op
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.add([inputs, inputs])
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
model = Model(self.add_op, self.relu_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class AddBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
add_op = Add(self.input_shape, config)
|
||||
self.add_op = add_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, add_op):
|
||||
super().__init__()
|
||||
self.add = add_op
|
||||
|
||||
def call(self, inputs):
|
||||
return self.add([inputs, inputs])
|
||||
|
||||
model = Model(self.add_op)
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class GroupedConvBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
self.cout = self.input_shape[2] if "COUT" not in config else config["COUT"]
|
||||
self.num_groups = config['NUM_GROUPS']
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, cout, num_groups, kernel_size, strides):
|
||||
super().__init__()
|
||||
self.cout = cout
|
||||
self.num_groups = num_groups
|
||||
self.kernel_size = kernel_size
|
||||
self.strides = strides
|
||||
|
||||
def call(self, inputs):
|
||||
x = [keras.layers.Conv2D(
|
||||
filters=self.cout // self.num_groups,
|
||||
kernel_size=self.kernel_size,
|
||||
strides=self.strides,
|
||||
padding="same",
|
||||
)(x) for x in tf.split(inputs, self.num_groups, axis=3)
|
||||
]
|
||||
return tf.concat(x, axis=3)
|
||||
|
||||
model = Model(self.cout, self.num_groups, self.config['KERNEL_SIZE'], self.config['STRIDES'])
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
||||
|
||||
|
||||
class MixedConvBlock(TFBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["HW"], config["HW"], config["CIN"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
self.cout = self.input_shape[2] if "COUT" not in config else config["COUT"]
|
||||
self.num_groups = config['NUM_GROUPS']
|
||||
|
||||
def get_model(self):
|
||||
class Model(keras.Model):
|
||||
def __init__(self, cout, num_groups, strides):
|
||||
super().__init__()
|
||||
self.cout = cout
|
||||
self.num_groups = num_groups
|
||||
self.strides = strides
|
||||
|
||||
def call(self, inputs):
|
||||
x = [keras.layers.Conv2D(
|
||||
filters=self.cout // self.num_groups,
|
||||
kernel_size=i * 2 + 3,
|
||||
strides=self.strides,
|
||||
padding="same",
|
||||
)(x) for i, x in zip(range(self.num_groups), tf.split(inputs, self.num_groups, axis=3))
|
||||
]
|
||||
return tf.concat(x, axis=3)
|
||||
|
||||
model = Model(self.cout, self.num_groups, self.config['STRIDES'])
|
||||
model(get_inputs_by_shapes(self.input_tensor_shape))
|
||||
return model
|
|
@ -0,0 +1,246 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from ..interface import BaseOperator
|
||||
|
||||
'''
|
||||
This file contains the keras implementation of operators
|
||||
'''
|
||||
|
||||
#---------------------- convolution layer ----------------------#
|
||||
|
||||
class Conv(BaseOperator):
|
||||
def get_model(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
return keras.layers.Conv2D(
|
||||
cout,
|
||||
kernel_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
output_h = (self.input_shape[0] - 1) // self.config["STRIDES"] + 1
|
||||
output_w = (self.input_shape[1] - 1) // self.config["STRIDES"] + 1
|
||||
return [output_h, output_w, cout]
|
||||
|
||||
|
||||
class DwConv(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.DepthwiseConv2D(
|
||||
kernel_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
output_h = (self.input_shape[0] - 1) // self.config["STRIDES"] + 1
|
||||
output_w = (self.input_shape[1] - 1) // self.config["STRIDES"] + 1
|
||||
return [output_h, output_w, self.input_shape[2]]
|
||||
|
||||
|
||||
class ConvTrans(BaseOperator):
|
||||
def get_model(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
return keras.layers.Conv2DTranspose(
|
||||
cout,
|
||||
kernel_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
cout = self.input_shape[2] if "COUT" not in self.config else self.config["COUT"]
|
||||
return [self.input_shape[0] * self.config["STRIDES"], self.input_shape[1] * self.config["STRIDES"], cout]
|
||||
|
||||
#------------------ normalization and pooling ------------------#
|
||||
|
||||
class BN(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.BatchNormalization()
|
||||
|
||||
|
||||
class GlobalAvgpool(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.GlobalAveragePooling2D()
|
||||
|
||||
def get_output_shape(self):
|
||||
return [self.input_shape[2]]
|
||||
|
||||
|
||||
class MaxPool(BaseOperator):
|
||||
def get_model(self):
|
||||
if "POOL_STRIDES" not in self.config:
|
||||
self.config["POOL_STRIDES"] = self.config["STRIDES"]
|
||||
return keras.layers.MaxPool2D(
|
||||
pool_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["POOL_STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
if "POOL_STRIDES" not in self.config:
|
||||
self.config["POOL_STRIDES"] = self.config["STRIDES"]
|
||||
output_h = (self.input_shape[0] - 1) // self.config["POOL_STRIDES"] + 1
|
||||
output_w = (self.input_shape[1] - 1) // self.config["POOL_STRIDES"] + 1
|
||||
return [output_h, output_w, self.input_shape[2]]
|
||||
|
||||
|
||||
class AvgPool(BaseOperator):
|
||||
def get_model(self):
|
||||
if "POOL_STRIDES" not in self.config:
|
||||
self.config["POOL_STRIDES"] = self.config["STRIDES"]
|
||||
return keras.layers.AveragePooling2D(
|
||||
pool_size=self.config["KERNEL_SIZE"],
|
||||
strides=self.config["POOL_STRIDES"],
|
||||
padding="same"
|
||||
)
|
||||
|
||||
def get_output_shape(self):
|
||||
if "POOL_STRIDES" not in self.config:
|
||||
self.config["POOL_STRIDES"] = self.config["STRIDES"]
|
||||
output_h = (self.input_shape[0] - 1) // self.config["POOL_STRIDES"] + 1
|
||||
output_w = (self.input_shape[1] - 1) // self.config["POOL_STRIDES"] + 1
|
||||
return [output_h, output_w, self.input_shape[2]]
|
||||
|
||||
#------------------------ other modules ------------------------#
|
||||
|
||||
class SE(BaseOperator):
|
||||
def get_model(self):
|
||||
class SE(keras.layers.Layer):
|
||||
def __init__(self, input_shape):
|
||||
super().__init__()
|
||||
self.in_shape = input_shape
|
||||
self.conv1 = keras.layers.Conv2D(
|
||||
filters=self.in_shape[-1] // 4,
|
||||
kernel_size=[1, 1],
|
||||
strides=[1, 1],
|
||||
padding="same",
|
||||
)
|
||||
self.conv2 = keras.layers.Conv2D(
|
||||
filters=self.in_shape[-1],
|
||||
kernel_size=[1, 1],
|
||||
strides=[1, 1],
|
||||
padding="same",
|
||||
)
|
||||
|
||||
def call(self, inputs):
|
||||
x = tf.nn.avg_pool(
|
||||
inputs,
|
||||
ksize=[1] + self.in_shape[0:2] + [1],
|
||||
strides=[1, 1, 1, 1],
|
||||
padding="VALID",
|
||||
)
|
||||
x = self.conv1(x)
|
||||
x = tf.nn.relu(x)
|
||||
x = self.conv2(x)
|
||||
x = tf.nn.relu6(tf.math.add(x, 3)) * 0.16667
|
||||
return x * inputs
|
||||
return SE(self.input_shape)
|
||||
|
||||
|
||||
class FC(BaseOperator):
|
||||
def get_model(self):
|
||||
cout = self.input_shape[-1] if "COUT" not in self.config else self.config["COUT"]
|
||||
return keras.layers.Dense(cout)
|
||||
|
||||
def get_output_shape(self):
|
||||
cout = self.input_shape[-1] if "COUT" not in self.config else self.config["COUT"]
|
||||
return self.input_shape[:-1] + [cout]
|
||||
|
||||
#-------------------- activation function --------------------#
|
||||
|
||||
class Relu(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.ReLU()
|
||||
|
||||
|
||||
class Relu6(BaseOperator):
|
||||
def get_model(self):
|
||||
def func(inputs):
|
||||
return tf.nn.relu6(inputs)
|
||||
return func
|
||||
|
||||
|
||||
class Sigmoid(BaseOperator):
|
||||
def get_model(self):
|
||||
def func(inputs):
|
||||
return tf.nn.sigmoid(inputs)
|
||||
return func
|
||||
|
||||
|
||||
class Hswish(BaseOperator):
|
||||
def get_model(self):
|
||||
def func(inputs):
|
||||
return tf.nn.relu6(tf.math.add(inputs, 3)) * 0.16667
|
||||
return func
|
||||
|
||||
#---------------------- basic operation ----------------------#
|
||||
|
||||
class Reshape(BaseOperator):
|
||||
def get_model(self):
|
||||
if len(self.input_shape) == 3:
|
||||
self.output_shape = [self.input_shape[2], self.input_shape[0], self.input_shape[1]]
|
||||
def func(inputs):
|
||||
return tf.reshape(inputs, [1] + self.output_shape)
|
||||
else:
|
||||
self.output_shape = [1, 2, int(self.input_shape[0] / 2)]
|
||||
def func(inputs):
|
||||
return tf.reshape(inputs, [1] + self.output_shape)
|
||||
return func
|
||||
|
||||
def get_output_shape(self):
|
||||
return self.output_shape
|
||||
|
||||
|
||||
class Add(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.Add()
|
||||
|
||||
def get_output_shape(self):
|
||||
if len(self.input_shape) == 2 and type(self.input_shape[0]) == list:
|
||||
output_shape = self.input_shape[0]
|
||||
else:
|
||||
output_shape = self.input_shape
|
||||
return output_shape
|
||||
|
||||
def get_is_two_inputs(self):
|
||||
return True
|
||||
|
||||
|
||||
class Concat(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.Concatenate()
|
||||
|
||||
def get_output_shape(self):
|
||||
if len(self.input_shape) > 1 and type(self.input_shape[0]) == list: # e.g. [[28, 28, 3], [28, 28, 5]] -> [28, 28, 8]
|
||||
output_shape = self.input_shape[0][:-1] + [sum([i[-1] for i in self.input_shape])]
|
||||
elif len(self.input_shape) == 3: # e.g. [28, 28, 4] -> [28, 28, 8]
|
||||
output_shape = self.input_shape[0:-1] + [self.input_shape[-1] * 2]
|
||||
else: # e.g. [1024] -> [2048]
|
||||
output_shape = [self.input_shape[0] * 2]
|
||||
return output_shape
|
||||
|
||||
def get_is_two_inputs(self):
|
||||
return True
|
||||
|
||||
|
||||
class Flatten(BaseOperator):
|
||||
def get_model(self):
|
||||
return keras.layers.Flatten()
|
||||
|
||||
def get_output_shape(self):
|
||||
return [int(np.prod(self.input_shape))]
|
||||
|
||||
|
||||
class Split(BaseOperator):
|
||||
def get_model(self):
|
||||
def func(inputs):
|
||||
return tf.split(inputs, num_or_size_splits=2, axis=3)
|
||||
return func
|
||||
|
||||
def get_output_shape(self):
|
||||
return [self.input_shape[0], self.input_shape[1], self.input_shape[2] // 2]
|
|
@ -0,0 +1,17 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
|
||||
def get_tensor_by_shapes(shapes):
|
||||
if len(shapes) == 1:
|
||||
return tf.random.normal(shape = [1] + shapes[0])
|
||||
else:
|
||||
return [tf.random.normal(shape = [1] + shape) for shape in shapes]
|
||||
|
||||
|
||||
def get_inputs_by_shapes(shapes):
|
||||
if len(shapes) == 1:
|
||||
return keras.Input(shape=shapes[0], batch_size=1)
|
||||
else:
|
||||
return [keras.Input(shape=shape, batch_size=1) for shape in shapes]
|
|
@ -0,0 +1,2 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
|
@ -0,0 +1,92 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import logging
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from .operators import *
|
||||
from .utils import get_inputs_by_shapes
|
||||
from ..interface import BaseBlock
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class TorchBlock(BaseBlock):
|
||||
def test_block(self):
|
||||
pass
|
||||
|
||||
def save_model(self, save_path):
|
||||
model = self.get_model()
|
||||
torch.onnx.export(
|
||||
model,
|
||||
get_inputs_by_shapes(self.input_tensor_shape),
|
||||
save_path + ".onnx",
|
||||
input_names=['input'],
|
||||
output_names=['output'],
|
||||
verbose=False,
|
||||
export_params=True,
|
||||
opset_version=12,
|
||||
do_constant_folding=True,
|
||||
)
|
||||
|
||||
|
||||
class ConvBnRelu(TorchBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["CIN"], config["HW"], config["HW"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
conv_op = Conv(self.input_shape, config)
|
||||
self.conv_op, out_shape = conv_op.get_model(), conv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(nn.Module):
|
||||
def __init__(self, conv_op, bn_op, relu_op):
|
||||
super().__init__()
|
||||
self.conv = conv_op
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
|
||||
def forward(self, inputs):
|
||||
x = self.conv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
return Model(self.conv_op, self.bn_op, self.relu_op)
|
||||
|
||||
|
||||
class DwConvBnRelu(TorchBlock):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.input_shape = [config["CIN"], config["HW"], config["HW"]]
|
||||
self.input_tensor_shape = [self.input_shape]
|
||||
|
||||
dwconv_op = DwConv(self.input_shape, config)
|
||||
self.dwconv_op, out_shape = dwconv_op.get_model(), dwconv_op.get_output_shape()
|
||||
|
||||
bn_op = BN(out_shape, config)
|
||||
self.bn_op, out_shape = bn_op.get_model(), bn_op.get_output_shape()
|
||||
|
||||
relu_op = Relu(out_shape, config)
|
||||
self.relu_op = relu_op.get_model()
|
||||
|
||||
def get_model(self):
|
||||
class Model(nn.Module):
|
||||
def __init__(self, dwconv_op, bn_op, relu_op):
|
||||
super().__init__()
|
||||
self.dwconv = dwconv_op
|
||||
self.bn = bn_op
|
||||
self.relu = relu_op
|
||||
|
||||
def forward(self, inputs):
|
||||
x = self.dwconv(inputs)
|
||||
x = self.bn(x)
|
||||
x = self.relu(x)
|
||||
return x
|
||||
|
||||
return Model(self.dwconv_op, self.bn_op, self.relu_op)
|
|
@ -0,0 +1,48 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import logging
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from ..interface import BaseOperator
|
||||
|
||||
'''
|
||||
This file contains the torch implementation of operators
|
||||
'''
|
||||
|
||||
#---------------------- convolution layer ----------------------#
|
||||
|
||||
class Conv(BaseOperator):
|
||||
def get_model(self):
|
||||
cin = self.input_shape[0]
|
||||
cout = cin if "COUT" not in self.config else self.config["COUT"]
|
||||
return nn.Conv2d(cin, cout, kernel_size=self.config["KERNEL_SIZE"], stride=self.config["STRIDES"], padding=1)
|
||||
|
||||
def get_output_shape(self):
|
||||
cout = self.input_shape[0] if "COUT" not in self.config else self.config["COUT"]
|
||||
output_h = (self.input_shape[1] - 1) // self.config["STRIDES"] + 1
|
||||
output_w = (self.input_shape[2] - 1) // self.config["STRIDES"] + 1
|
||||
return [cout, output_h, output_w]
|
||||
|
||||
|
||||
class DwConv(BaseOperator):
|
||||
def get_model(self):
|
||||
cin = self.input_shape[0]
|
||||
return nn.Conv2d(cin, cin, kernel_size=self.config["KERNEL_SIZE"], stride=self.config["STRIDES"], padding=1, groups=cin)
|
||||
|
||||
def get_output_shape(self):
|
||||
cin = self.input_shape[0]
|
||||
output_h = (self.input_shape[1] - 1) // self.config["STRIDES"] + 1
|
||||
output_w = (self.input_shape[2] - 1) // self.config["STRIDES"] + 1
|
||||
return [cin, output_h, output_w]
|
||||
|
||||
|
||||
class BN(BaseOperator):
|
||||
def get_model(self):
|
||||
cin = self.input_shape[0]
|
||||
return nn.BatchNorm2d(cin)
|
||||
|
||||
|
||||
class Relu(BaseOperator):
|
||||
def get_model(self):
|
||||
return nn.ReLU()
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
def get_tensor_by_shapes(shapes):
|
||||
if len(shapes) == 1:
|
||||
return torch.randn(size=[1] + shapes[0])
|
||||
else:
|
||||
return [torch.randn(size=[1] + shape) for shape in shapes]
|
||||
|
||||
|
||||
def get_inputs_by_shapes(shapes):
|
||||
return get_tensor_by_shapes(shapes)
|
|
@ -0,0 +1,223 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
from . import builder_config
|
||||
from .utils import save_profiled_results
|
||||
from nn_meter.builder.utils import merge_prev_info
|
||||
from nn_meter.builder.backends import connect_backend
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
def convert_models(backend, models, mode = 'predbuild', broken_point_mode = False):
|
||||
""" convert the model to the needed format by backend, in order to increase efficiency when profiling on device.
|
||||
|
||||
@params:
|
||||
|
||||
backend (subclass instance of BaseBackend): applied backend instance
|
||||
|
||||
models (str or dict): the Dict of models or the path of the json file about models information
|
||||
|
||||
mode (str): the mode for running models, including ['ruletest', 'predbuild']
|
||||
|
||||
broken_point_mode (boolean): broken_point_mode will skip all models have attributes "converted_model"
|
||||
|
||||
"""
|
||||
if isinstance(models, str):
|
||||
save_name = os.path.basename(models)
|
||||
with open(models, 'r') as fp:
|
||||
models = json.load(fp)
|
||||
else:
|
||||
save_name = "converted_results.json"
|
||||
|
||||
ws_mode_path = builder_config.get('MODEL_DIR', mode)
|
||||
model_save_path = os.path.join(ws_mode_path, 'models')
|
||||
os.makedirs(model_save_path, exist_ok=True)
|
||||
info_save_path = os.path.join(ws_mode_path, "results")
|
||||
os.makedirs(info_save_path, exist_ok=True)
|
||||
|
||||
# convert models
|
||||
count = 0
|
||||
for _, modules in models.items():
|
||||
for id, model in modules.items():
|
||||
if broken_point_mode and 'converted_model' in model:
|
||||
continue
|
||||
try:
|
||||
model_path = model['model']
|
||||
converted_model = backend.convert_model(model_path, model_save_path, model['shapes'])
|
||||
model['converted_model'] = converted_model
|
||||
except:
|
||||
open(os.path.join(info_save_path, "convert_error.log"), 'a').write(id + "\n")
|
||||
|
||||
# save information to json file for per 50 models
|
||||
count += 1
|
||||
if count % 50 == 0:
|
||||
with open(os.path.join(info_save_path, save_name), 'w') as fp:
|
||||
json.dump(models, fp, indent=4)
|
||||
|
||||
# save information to json file
|
||||
with open(os.path.join(info_save_path, save_name), 'w') as fp:
|
||||
json.dump(models, fp, indent=4)
|
||||
logging.keyinfo(f"Save the converted models information to {os.path.join(info_save_path, save_name)}")
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def profile_models(backend, models, mode = 'ruletest', metrics = ["latency"], save_name = None,
|
||||
have_converted = False):
|
||||
""" run models with given backend and return latency of testcase models
|
||||
|
||||
@params:
|
||||
|
||||
backend (subclass instance of BaseBackend): applied backend instance
|
||||
|
||||
models (str or dict): the Dict of models or the path of the json file about models information
|
||||
|
||||
mode (str): the mode for running models, including ['ruletest', 'predbuild']
|
||||
|
||||
metrics (list): required metrics to report. We only support latency for metric by now.
|
||||
|
||||
save_name (str): the save name to store profiled results. The whole path should be "<workspace>/<mode-folder>/results/<save-name>"
|
||||
|
||||
have_converted (boolean): if the model have been converted to the needed format by backend, the model will not be converted
|
||||
before profiling. The model path of `model['converted_model']` will be profiled on device directly. The conversion of
|
||||
model could be done by appling `nn_meter.builder.convert_models`
|
||||
"""
|
||||
if isinstance(models, str):
|
||||
with open(models, 'r') as fp:
|
||||
models = json.load(fp)
|
||||
|
||||
ws_mode_path = builder_config.get('MODEL_DIR', mode)
|
||||
model_save_path = os.path.join(ws_mode_path, 'models')
|
||||
os.makedirs(model_save_path, exist_ok=True)
|
||||
info_save_path = os.path.join(ws_mode_path, "results")
|
||||
os.makedirs(info_save_path, exist_ok=True)
|
||||
|
||||
# profile models and get metric results
|
||||
count = 0
|
||||
detail = builder_config.get('DETAIL', mode)
|
||||
save_name = save_name or "profiled_results.json"
|
||||
logging.info("Profiling ...")
|
||||
for _, modules in models.items():
|
||||
for id, model in modules.items():
|
||||
if have_converted: # the models have been converted for the backend
|
||||
try:
|
||||
model_path = model['converted_model']
|
||||
profiled_res = backend.profile(model_path, metrics, model['shapes'])
|
||||
for metric in metrics:
|
||||
model[metric] = profiled_res[metric]
|
||||
time.sleep(2)
|
||||
count += 1
|
||||
except:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(id + "\n")
|
||||
else: # the models have not been converted
|
||||
try:
|
||||
model_path = model['model']
|
||||
profiled_res = backend.profile_model_file(model_path, model_save_path, model['shapes'], metrics)
|
||||
for metric in metrics:
|
||||
model[metric] = profiled_res[metric]
|
||||
time.sleep(2)
|
||||
count += 1
|
||||
except:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(id + "\n")
|
||||
|
||||
# save information to json file for per 50 models
|
||||
if count > 0 and count % 50 == 0:
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail)
|
||||
logging.keyinfo(f"{count} model complete. Still profiling... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
|
||||
|
||||
# save information to json file
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail)
|
||||
logging.keyinfo(f"All {count} models complete. Save all success profiled results to {os.path.join(info_save_path, save_name)}.")
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mode = 'prior', configs = None, mark = '', detail = True):
|
||||
''' sample kernel configs and profile kernel model based on configs
|
||||
'''
|
||||
from nn_meter.builder.kernel_predictor_builder import generate_config_sample
|
||||
|
||||
# sample configs for kernel and generate models
|
||||
models = generate_config_sample(kernel_type, sample_num, mark=mark,
|
||||
sampling_mode=sampling_mode, configs=configs)
|
||||
|
||||
# connect to backend, run models and get latency
|
||||
backend = connect_backend(backend_name=backend)
|
||||
profiled_results = profile_models(backend, models, mode='predbuild', save_name=f"profiled_{kernel_type}.json")
|
||||
return profiled_results
|
||||
|
||||
|
||||
def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1):
|
||||
"""
|
||||
Build latency predictor for given kernel. This method contains three main steps:
|
||||
1. sample kernel configs and profile kernel model based on configs;
|
||||
2. initialize latency predictor of kernel based on the profiled data;
|
||||
3. adopt adaptive sampler with iteratively doing step 1 for finegrained sampling to improve predictor performance
|
||||
|
||||
@params
|
||||
|
||||
kernel_type (str): the type of kernel
|
||||
|
||||
backend (str): the name of backend instance to profile models
|
||||
|
||||
init_sample_num (int, optional): the data size for predictor initialization. Defaults to 1000.
|
||||
|
||||
finegrained_sample_num (int, optional): the data size for adaptive sampling. For each data with error higher than
|
||||
error_threshold, #finegrained_sample_num data will be generated based the the large error data. Defaults to 10.
|
||||
|
||||
iteration (int, optional): the iteration for adaptive sampler. Defaults to 5.
|
||||
|
||||
error_threshold (float, optional): the threshold of large error. Defaults to 0.2.
|
||||
"""
|
||||
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data
|
||||
ws_mode_path = builder_config.get('MODEL_DIR', 'predbuild')
|
||||
|
||||
|
||||
# init predictor builder with prior data sampler
|
||||
kernel_data = sample_and_profile_kernel_data(kernel_type, init_sample_num, backend, sampling_mode='prior', mark='prior')
|
||||
|
||||
# use current sampled data to build regression model, and locate data with large errors in testset
|
||||
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark='prior',
|
||||
save_path=os.path.join(ws_mode_path, "results"))
|
||||
logging.keyinfo(f'Iteration 0: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
|
||||
for i in range(1, iteration):
|
||||
# finegrained sampling and profiling for large error data
|
||||
new_kernel_data = sample_and_profile_kernel_data(kernel_type, finegrained_sample_num, backend,
|
||||
sampling_mode = 'finegrained', configs=error_configs, mark=f'finegrained{i}')
|
||||
|
||||
# merge finegrained data with previous data and build new regression model
|
||||
kernel_data = merge_prev_info(new_info=new_kernel_data, prev_info=kernel_data)
|
||||
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark='finegrained{i}',
|
||||
save_path=os.path.join(ws_mode_path, "results"))
|
||||
logging.keyinfo(f'Iteration {i}: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
|
||||
return predictor, kernel_data
|
||||
|
||||
|
||||
def build_latency_predictor(backend):
|
||||
"""
|
||||
Build latency predictor for all kernel in `<workspace-path>/configs/predictorbuild_config.yaml`
|
||||
|
||||
@params
|
||||
|
||||
backend (str): the name of backend instance to profile models
|
||||
|
||||
"""
|
||||
kernels = builder_config.get("KERNELS", 'predbuild')
|
||||
|
||||
for kernel_type in kernels:
|
||||
init_sample_num = kernels[kernel_type]["INIT_SAMPLE_NUM"]
|
||||
finegrained_sample_num = kernels[kernel_type]["FINEGRAINED_SAMPLE_NUM"]
|
||||
iteration = kernels[kernel_type]["ITERATION"]
|
||||
error_threshold = kernels[kernel_type]["ERROR_THRESHOLD"]
|
||||
build_predictor_for_kernel(
|
||||
kernel_type, backend,
|
||||
init_sample_num = init_sample_num,
|
||||
finegrained_sample_num = finegrained_sample_num,
|
||||
iteration = iteration,
|
||||
error_threshold = error_threshold
|
||||
)
|
|
@ -0,0 +1,54 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
def merge_prev_info(new_info, info_save_path = None, prev_info = None):
|
||||
''' merge new_info with previous info and return the updated info. This method is used in two cases:
|
||||
|
||||
1. before save `new_info` to `info_save_path`, we need to check if the `info_save_path` is an existing file. If `info_save_path`
|
||||
exists, this method will help merge the previous info saved in `info_save_path` for a incrementally storage and avoid information
|
||||
loss. In this case, params `new_info` and `info_save_path` are needed.
|
||||
|
||||
2. extend the dictionary of `prev_info` with `new_info`. In this case, params `new_info` and `prev_info` are needed.
|
||||
|
||||
@params
|
||||
|
||||
new_info (dict): new information
|
||||
|
||||
info_save_path (str): the path to save the new info. We need to check if the path is empty and mantain the previous info
|
||||
in `info_save_path`.
|
||||
|
||||
prev_info (dict): the previous information
|
||||
'''
|
||||
if (info_save_path == None and prev_info == None) or (info_save_path != None and prev_info != None):
|
||||
raise ValueError("One and only one params of `info_save_path` and `prev_info` is needed.")
|
||||
|
||||
if info_save_path != None and os.path.isfile(info_save_path):
|
||||
with open(info_save_path, 'r') as fp:
|
||||
prev_info = json.load(fp)
|
||||
|
||||
if prev_info == None:
|
||||
return new_info
|
||||
|
||||
if isinstance(prev_info, str):
|
||||
with open(prev_info, 'r') as fp:
|
||||
prev_info = json.load(fp)
|
||||
if isinstance(new_info, str):
|
||||
with open(new_info, 'r') as fp:
|
||||
new_info = json.load(fp)
|
||||
|
||||
for module_key in new_info.keys():
|
||||
if module_key in prev_info:
|
||||
prev_info[module_key].update(new_info[module_key])
|
||||
else:
|
||||
prev_info[module_key] = new_info[module_key]
|
||||
return prev_info
|
||||
|
||||
|
||||
def save_profiled_results(models, save_path, detail):
|
||||
new_models = merge_prev_info(new_info=models, info_save_path=save_path)
|
||||
from .backend_meta.utils import dump_profiled_results
|
||||
with open(save_path, 'w') as fp:
|
||||
json.dump(dump_profiled_results(new_models, detail=detail), fp, indent=4)
|
|
@ -0,0 +1,5 @@
|
|||
OPENVINO_ENV:
|
||||
OPTIMIZER_PATH': /data/openvino_2019.2.242/deployment_tools/model_optimizer/mo_tf.py
|
||||
OPENVINO_RUNTIME_DIR': /data/openvino_2019.2.242/bin
|
||||
DEVICE_SERIAL': /dev/ttyUSB4
|
||||
DATA_TYPE': FP16
|
|
@ -0,0 +1,5 @@
|
|||
REMOTE_MODEL_DIR: /mnt/sdcard/tflite_bench
|
||||
KERNEL_PATH: /mnt/sdcard/tflite_bench/kernel.cl
|
||||
BENCHMARK_MODEL_PATH: /data/local/tmp/benchmark_model
|
||||
BENCHMARK_MODEL_VERSION: 2.1
|
||||
DEVICE_SERIAL:
|
|
@ -0,0 +1,89 @@
|
|||
HW: 28
|
||||
CIN: 16
|
||||
SHAPE_1D: 428
|
||||
COUT: 256
|
||||
KERNEL_SIZE: 3
|
||||
PADDING: 'same'
|
||||
STRIDES: 1
|
||||
POOL_STRIDES: 2
|
||||
EMP_ALPHA: 0.5
|
||||
DETAIL: FALSE
|
||||
BASIC_TESTCASES:
|
||||
- add_add
|
||||
- add_avgpool
|
||||
- add_concat
|
||||
- add_conv
|
||||
- add_convtrans
|
||||
- add_fc
|
||||
- add_dwconv
|
||||
- add_relu
|
||||
- add_reshape
|
||||
- avgpool_add
|
||||
- avgpool_avgpool
|
||||
- avgpool_concat
|
||||
- avgpool_conv
|
||||
- avgpool_convtrans
|
||||
- avgpool_dwconv
|
||||
- avgpool_relu
|
||||
- avgpool_reshape
|
||||
- concat_add
|
||||
- concat_avgpool
|
||||
- concat_concat
|
||||
- concat_conv
|
||||
- concat_convtrans
|
||||
- concat_fc
|
||||
- concat_dwconv
|
||||
- concat_relu
|
||||
- concat_reshape
|
||||
- conv_add
|
||||
- conv_avgpool
|
||||
- conv_concat
|
||||
- conv_conv
|
||||
- conv_convtrans
|
||||
- conv_dwconv
|
||||
- conv_hswish
|
||||
- conv_relu
|
||||
- conv_reshape
|
||||
- conv_se
|
||||
- convtrans_add
|
||||
- convtrans_avgpool
|
||||
- convtrans_concat
|
||||
- convtrans_conv
|
||||
- convtrans_convtrans
|
||||
- convtrans_dwconv
|
||||
- convtrans_relu
|
||||
- convtrans_reshape
|
||||
- fc_add
|
||||
- fc_concat
|
||||
- fc_fc
|
||||
- fc_relu
|
||||
- dwconv_add
|
||||
- dwconv_avgpool
|
||||
- dwconv_concat
|
||||
- dwconv_conv
|
||||
- dwconv_convtrans
|
||||
- dwconv_dwconv
|
||||
- dwconv_relu
|
||||
- dwconv_reshape
|
||||
- relu_add
|
||||
- relu_avgpool
|
||||
- relu_concat
|
||||
- relu_conv
|
||||
- relu_convtrans
|
||||
- relu_fc
|
||||
- relu_dwconv
|
||||
- relu_relu
|
||||
- relu_reshape
|
||||
- reshape_add
|
||||
- reshape_avgpool
|
||||
- reshape_concat
|
||||
- reshape_conv
|
||||
- reshape_convtrans
|
||||
- reshape_dwconv
|
||||
- reshape_relu
|
||||
- reshape_reshape
|
||||
- se_relu
|
||||
OTHER_TESTCASES:
|
||||
- MON
|
||||
LAYERS_1D:
|
||||
- fc
|
|
@ -0,0 +1,84 @@
|
|||
KERNEL: None
|
||||
DETAIL: FALSE
|
||||
IMPLEMENT: tensorflow
|
||||
KERNELS:
|
||||
conv_bn_relu:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
dwconv_bn_relu:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
maxpool_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
avgpool_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
fc_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
concat_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
split_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
channel_shuffle:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
se_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
globalavgpool_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
bn_relu:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
bn_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
hswish_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
relu_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
add_relu:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
||||
add_block:
|
||||
INIT_SAMPLE_NUM: 1000
|
||||
FINEGRAINED_SAMPLE_NUM: 50
|
||||
ITERATION: 5
|
||||
ERROR_THRESHOLD: 0.1
|
|
@ -1,12 +1,12 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import os, sys
|
||||
import os
|
||||
import logging
|
||||
import jsonlines
|
||||
from glob import glob
|
||||
|
||||
from nn_meter.predictor import latency_metrics, list_latency_predictors, load_latency_predictor
|
||||
from nn_meter.utils import download_from_url, get_user_data_folder
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
__user_dataset_folder__ = os.path.join(get_user_data_folder(), 'dataset')
|
||||
|
@ -22,7 +22,6 @@ def bench_dataset(url="https://github.com/microsoft/nn-Meter/releases/download/v
|
|||
return datasets
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(stream=sys.stdout, format="(nn-Meter) %(message)s", level=logging.KEYINFO)
|
||||
|
||||
datasets = bench_dataset()
|
||||
hws = list_latency_predictors()
|
||||
|
|
|
@ -3,12 +3,9 @@
|
|||
import re
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from .protobuf_helper import ProtobufHelper
|
||||
from nn_meter.utils.import_package import try_import_tensorflow
|
||||
|
||||
|
||||
logging = logging.getLogger(__name__)
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class FrozenPbParser:
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
# Copyright (c) Microsoft Corporation.
|
||||
# Licensed under the MIT license.
|
||||
import logging
|
||||
|
||||
logging = logging.getLogger(__name__)
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class ProtobufHelper:
|
||||
|
|
|
@ -5,9 +5,7 @@ import copy
|
|||
import math
|
||||
import logging
|
||||
from .protobuf_helper import ProtobufHelper as ph
|
||||
|
||||
|
||||
logging = logging.getLogger(__name__)
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class ShapeInference:
|
||||
|
|
|
@ -5,6 +5,7 @@ from itertools import chain
|
|||
from .utils import get_tensor_shape
|
||||
from .constants import SLICE_TYPE
|
||||
from nn_meter.utils.import_package import try_import_onnx
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
class OnnxConverter:
|
||||
|
|
|
@ -2,10 +2,11 @@
|
|||
# Licensed under the MIT license.
|
||||
import json
|
||||
import logging
|
||||
from nn_meter.utils.import_package import try_import_onnx, try_import_torch, try_import_torchvision_models
|
||||
from .onnx_converter import OnnxConverter
|
||||
from .frozenpb_converter import FrozenPbConverter
|
||||
from .torch_converter import NNIBasedTorchConverter, OnnxBasedTorchConverter, NNIIRConverter
|
||||
from nn_meter.utils.import_package import try_import_onnx, try_import_torch, try_import_torchvision_models
|
||||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
def model_file_to_graph(filename: str, model_type: str, input_shape=(1, 3, 224, 224), apply_nni=False):
|
||||
|
@ -132,6 +133,3 @@ def torch_model_to_graph(model, input_shape=(1, 3, 224, 224), apply_nni=False):
|
|||
logging.info("Onnx-based Torch Converter is applied for model conversion")
|
||||
converter = OnnxBasedTorchConverter(model, args)
|
||||
return converter.convert()
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
from nn_meter.utils.graph_tool import ModelGraph
|
||||
from .utils.constants import DUMMY_TYPES
|
||||
from .utils.ir_tools import convert_nodes
|
||||
from .rulelib.rule_reader import RuleReader
|
||||
from .rulelib.rule_splitter import RuleSplitter
|
||||
from .rule_reader import RuleReader
|
||||
from .rule_splitter import RuleSplitter
|
||||
|
||||
|
||||
class KernelDetector:
|
||||
|
@ -21,11 +21,7 @@ class KernelDetector:
|
|||
self.model_graph.refresh()
|
||||
self.bbs = self.splitter.split(self.model_graph)
|
||||
|
||||
@property
|
||||
def kernels(self):
|
||||
"""
|
||||
TODO: Should be a method and renamed to get_kernels()
|
||||
"""
|
||||
def get_kernels(self):
|
||||
kernels = []
|
||||
self._global_index = 0
|
||||
self._layer_kernel_dict = {}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче