2019-04-08 07:14:02 +03:00
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
# or more contributor license agreements. See the NOTICE file
|
|
|
|
# distributed with this work for additional information
|
|
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
|
|
# to you under the Apache License, Version 2.0 (the
|
|
|
|
# "License"); you may not use this file except in compliance
|
|
|
|
# with the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing,
|
|
|
|
# software distributed under the License is distributed on an
|
|
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
|
|
# KIND, either express or implied. See the License for the
|
|
|
|
# specific language governing permissions and limitations
|
|
|
|
# under the License.
|
2018-11-01 05:41:47 +03:00
|
|
|
"""
|
|
|
|
Construct the necessary state for the TVM graph runtime
|
|
|
|
from a Relay expression.
|
|
|
|
"""
|
2019-06-27 19:37:51 +03:00
|
|
|
import warnings
|
2019-05-16 03:28:18 +03:00
|
|
|
import numpy as np
|
2019-01-12 02:31:14 +03:00
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
from tvm import expr as tvm_expr
|
2018-11-05 09:02:27 +03:00
|
|
|
from .. import nd as _nd, target as _target, autotvm
|
|
|
|
from ..contrib import graph_runtime as _graph_rt
|
2019-05-16 03:28:18 +03:00
|
|
|
from . import _build_module
|
2019-05-02 18:59:22 +03:00
|
|
|
from . import ty as _ty
|
2019-05-16 03:28:18 +03:00
|
|
|
from . import expr as _expr
|
2019-06-27 19:37:51 +03:00
|
|
|
from .module import Module as _Module
|
2018-11-05 09:02:27 +03:00
|
|
|
from .backend import interpreter as _interpreter
|
2019-05-09 09:09:15 +03:00
|
|
|
from .backend.vm import VMExecutor
|
2018-11-01 05:41:47 +03:00
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
def _update_target(target):
|
|
|
|
target = target if target else _target.current_target()
|
|
|
|
if target is None:
|
|
|
|
raise ValueError("Target is not set in env or passed as argument.")
|
2018-11-05 09:02:27 +03:00
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
tgts = {}
|
|
|
|
if isinstance(target, (str, _target.Target)):
|
|
|
|
dev_type = tvm_expr.IntImm("int32", _nd.context(str(target)).device_type)
|
|
|
|
tgts[dev_type] = _target.create(target)
|
|
|
|
elif isinstance(target, dict):
|
|
|
|
for dev, tgt in target.items():
|
|
|
|
dev_type = tvm_expr.IntImm("int32", _nd.context(dev).device_type)
|
|
|
|
tgts[dev_type] = _target.create(tgt)
|
|
|
|
else:
|
|
|
|
raise TypeError("target is expected to be str or " +
|
|
|
|
"tvm.target.Target, but received " +
|
|
|
|
"{}".format(type(target)))
|
|
|
|
return tgts
|
2019-01-07 19:46:34 +03:00
|
|
|
|
2018-11-14 19:56:40 +03:00
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
class BuildModule(object):
|
|
|
|
"""Build a Relay function to run on TVM graph runtime. This class is used
|
|
|
|
to expose the `RelayBuildModule` APIs implemented in C++.
|
2018-11-01 05:41:47 +03:00
|
|
|
"""
|
2019-05-16 03:28:18 +03:00
|
|
|
def __init__(self):
|
|
|
|
self.mod = _build_module._BuildModule()
|
|
|
|
self._get_graph_json = self.mod["get_graph_json"]
|
|
|
|
self._get_module = self.mod["get_module"]
|
|
|
|
self._build = self.mod["build"]
|
|
|
|
self._set_params_func = self.mod["set_params"]
|
|
|
|
self._get_params_func = self.mod["get_params"]
|
|
|
|
|
|
|
|
def build(self, func, target=None, target_host=None, params=None):
|
|
|
|
"""
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
func: relay.Function
|
|
|
|
The function to build.
|
|
|
|
|
|
|
|
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
|
|
|
|
device/context name) to str/tvm.target.Target, optional
|
|
|
|
For heterogeneous compilation, it is a dictionary indicating context
|
|
|
|
to target mapping. For homogeneous compilation, it is a build target.
|
|
|
|
|
|
|
|
target_host : str or :any:`tvm.target.Target`, optional
|
|
|
|
Host compilation target, if target is device.
|
|
|
|
When TVM compiles device specific program such as CUDA,
|
|
|
|
we also need host(CPU) side code to interact with the driver
|
|
|
|
to setup the dimensions and parameters correctly.
|
|
|
|
target_host is used to specify the host side codegen target.
|
|
|
|
By default, llvm is used if it is enabled,
|
|
|
|
otherwise a stackvm intepreter is used.
|
|
|
|
|
|
|
|
params : dict of str to NDArray
|
|
|
|
Input parameters to the graph that do not change
|
|
|
|
during inference time. Used for constant folding.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
graph_json : str
|
|
|
|
The json string that can be accepted by graph runtime.
|
|
|
|
|
|
|
|
mod : tvm.Module
|
|
|
|
The module containing necessary libraries.
|
|
|
|
|
|
|
|
params : dict
|
|
|
|
The parameters of the final graph.
|
|
|
|
"""
|
|
|
|
target = _update_target(target)
|
|
|
|
|
2019-06-03 20:40:38 +03:00
|
|
|
# Setup the params.
|
|
|
|
if params:
|
|
|
|
self._set_params(params)
|
2019-05-16 03:28:18 +03:00
|
|
|
# Build the function
|
|
|
|
self._build(func, target, target_host)
|
|
|
|
# Get artifacts
|
|
|
|
graph_json = self.get_json()
|
|
|
|
mod = self.get_module()
|
|
|
|
params = self.get_params()
|
|
|
|
|
|
|
|
return graph_json, mod, params
|
|
|
|
|
|
|
|
def _set_params(self, params):
|
|
|
|
inputs = {}
|
|
|
|
for name, param in params.items():
|
|
|
|
if isinstance(param, np.ndarray):
|
|
|
|
param = _nd.array(param)
|
|
|
|
inputs[name] = _expr.const(param)
|
|
|
|
self._set_params_func(inputs)
|
|
|
|
|
|
|
|
def get_json(self):
|
|
|
|
"""Return the json file of the built program."""
|
|
|
|
return self._get_graph_json()
|
|
|
|
|
|
|
|
def get_module(self):
|
|
|
|
"""Return the built module."""
|
|
|
|
return self._get_module()
|
|
|
|
|
|
|
|
def get_params(self):
|
|
|
|
"""Return the updated weights."""
|
|
|
|
params = self._get_params_func()
|
|
|
|
ret = {}
|
|
|
|
for key, value in params.items():
|
|
|
|
ret[key] = value.data
|
|
|
|
return ret
|
|
|
|
|
2018-11-05 09:02:27 +03:00
|
|
|
|
2019-06-27 19:37:51 +03:00
|
|
|
def build(mod, target=None, target_host=None, params=None):
|
2019-05-16 03:28:18 +03:00
|
|
|
"""Helper function that builds a Relay function to run on TVM graph
|
|
|
|
runtime.
|
2018-11-01 05:41:47 +03:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2019-06-27 19:37:51 +03:00
|
|
|
mod : relay.Module
|
|
|
|
The module to build. Using relay.Function is deprecated.
|
2018-11-01 05:41:47 +03:00
|
|
|
|
2019-01-12 02:31:14 +03:00
|
|
|
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context
|
|
|
|
name) to str/tvm.target.Target, optional
|
|
|
|
For heterogeneous compilation, it is a dictionary indicating context to
|
|
|
|
target mapping. For homogeneous compilation, it is a build target.
|
2018-11-05 09:02:27 +03:00
|
|
|
|
2019-01-12 02:31:14 +03:00
|
|
|
target_host : str or :any:`tvm.target.Target`, optional
|
2018-11-05 09:02:27 +03:00
|
|
|
Host compilation target, if target is device.
|
|
|
|
When TVM compiles device specific program such as CUDA,
|
|
|
|
we also need host(CPU) side code to interact with the driver
|
|
|
|
setup the dimensions and parameters correctly.
|
|
|
|
target_host is used to specify the host side codegen target.
|
|
|
|
By default, llvm is used if it is enabled,
|
|
|
|
otherwise a stackvm intepreter is used.
|
|
|
|
|
|
|
|
params : dict of str to NDArray
|
|
|
|
Input parameters to the graph that do not change
|
2018-11-14 19:56:40 +03:00
|
|
|
during inference time. Used for constant folding.
|
2018-11-01 05:41:47 +03:00
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2018-11-05 09:02:27 +03:00
|
|
|
graph_json : str
|
|
|
|
The json string that can be accepted by graph runtime.
|
2018-11-01 05:41:47 +03:00
|
|
|
|
2018-11-05 09:02:27 +03:00
|
|
|
mod : tvm.Module
|
|
|
|
The module containing necessary libraries.
|
|
|
|
|
|
|
|
params : dict
|
|
|
|
The parameters of the final graph.
|
2018-11-01 05:41:47 +03:00
|
|
|
"""
|
2019-06-27 19:37:51 +03:00
|
|
|
if isinstance(mod, _Module):
|
2019-07-06 07:23:27 +03:00
|
|
|
func = mod["main"]
|
2019-06-27 19:37:51 +03:00
|
|
|
elif isinstance(mod, _expr.Function):
|
|
|
|
func = mod
|
|
|
|
warnings.warn(
|
|
|
|
"Please use input parameter mod (tvm.relay.module.Module) "
|
|
|
|
"instead of deprecated parameter func (tvm.relay.expr.Function)",
|
|
|
|
DeprecationWarning)
|
|
|
|
else:
|
|
|
|
raise ValueError("Type of input parameter mod must be tvm.relay.module.Module")
|
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
target = _update_target(target)
|
2019-01-12 02:31:14 +03:00
|
|
|
|
2019-05-16 03:28:18 +03:00
|
|
|
if isinstance(target_host, (str, _target.Target)):
|
|
|
|
target_host = _target.create(target_host)
|
|
|
|
elif target_host:
|
|
|
|
raise ValueError("target host must be the type of str, " +
|
|
|
|
"tvm.target.Target, or None")
|
2018-11-05 09:02:27 +03:00
|
|
|
|
|
|
|
# If current dispatch context is fallback context (the default root context),
|
|
|
|
# then load pre-tuned parameters from TopHub
|
|
|
|
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
|
2019-05-16 03:28:18 +03:00
|
|
|
tophub_context = autotvm.tophub.context(list(target.values()))
|
2018-11-05 09:02:27 +03:00
|
|
|
else:
|
|
|
|
tophub_context = autotvm.util.EmptyContext()
|
|
|
|
|
|
|
|
with tophub_context:
|
2019-05-16 03:28:18 +03:00
|
|
|
bld_mod = BuildModule()
|
2019-06-27 19:37:51 +03:00
|
|
|
graph_json, mod, params = bld_mod.build(func, target, target_host, params)
|
2018-11-01 05:41:47 +03:00
|
|
|
return graph_json, mod, params
|
2018-11-05 09:02:27 +03:00
|
|
|
|
|
|
|
|
|
|
|
class GraphExecutor(_interpreter.Executor):
|
|
|
|
"""Wrapper around Executor interface.
|
|
|
|
|
|
|
|
This executor is used for debug and testing purpoes.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2018-12-02 09:27:49 +03:00
|
|
|
mod : :py:class:`~tvm.relay.module.Module`
|
2018-11-05 09:02:27 +03:00
|
|
|
The module to support the execution.
|
|
|
|
|
2018-12-02 09:27:49 +03:00
|
|
|
ctx : :py:class:`TVMContext`
|
2018-11-05 09:02:27 +03:00
|
|
|
The runtime context to run the code on.
|
|
|
|
|
2018-12-02 09:27:49 +03:00
|
|
|
target : :py:class:`Target`
|
2018-11-05 09:02:27 +03:00
|
|
|
The target option to build the function.
|
|
|
|
"""
|
2019-01-12 02:31:14 +03:00
|
|
|
|
2018-11-05 09:02:27 +03:00
|
|
|
def __init__(self, mod, ctx, target):
|
2019-07-02 05:29:56 +03:00
|
|
|
assert mod is not None
|
2018-11-05 09:02:27 +03:00
|
|
|
self.mod = mod
|
|
|
|
self.ctx = ctx
|
|
|
|
self.target = target
|
|
|
|
|
2019-06-17 19:55:08 +03:00
|
|
|
def _make_executor(self, expr=None):
|
2019-07-02 05:29:56 +03:00
|
|
|
if expr:
|
2019-07-06 07:23:27 +03:00
|
|
|
self.mod["main"] = expr
|
|
|
|
ret_type = self.mod["main"].checked_type.ret_type
|
2019-05-02 18:59:22 +03:00
|
|
|
num_outputs = len(ret_type.fields) if isinstance(ret_type, _ty.TupleType) else 1
|
2019-07-02 05:29:56 +03:00
|
|
|
graph_json, mod, params = build(self.mod, target=self.target)
|
2018-11-14 19:56:40 +03:00
|
|
|
gmodule = _graph_rt.create(graph_json, mod, self.ctx)
|
|
|
|
if params:
|
2019-02-01 00:53:44 +03:00
|
|
|
gmodule.set_input(**params)
|
2019-01-10 02:02:48 +03:00
|
|
|
|
|
|
|
def _graph_wrapper(*args, **kwargs):
|
2019-07-06 07:23:27 +03:00
|
|
|
args = self._convert_args(self.mod["main"], args, kwargs)
|
2018-11-05 09:02:27 +03:00
|
|
|
# Create map of inputs.
|
|
|
|
for i, arg in enumerate(args):
|
|
|
|
gmodule.set_input(i, arg)
|
|
|
|
# Run the module, and fetch the output.
|
|
|
|
gmodule.run()
|
2018-11-14 19:56:40 +03:00
|
|
|
# make a copy so multiple invocation won't hurt perf.
|
2019-05-02 18:59:22 +03:00
|
|
|
if num_outputs == 1:
|
|
|
|
return gmodule.get_output(0).copyto(_nd.cpu(0))
|
|
|
|
outputs = []
|
|
|
|
for i in range(num_outputs):
|
|
|
|
outputs.append(gmodule.get_output(i).copyto(_nd.cpu(0)))
|
|
|
|
return outputs
|
2018-11-05 09:02:27 +03:00
|
|
|
|
|
|
|
return _graph_wrapper
|
|
|
|
|
|
|
|
|
|
|
|
def create_executor(kind="debug",
|
|
|
|
mod=None,
|
|
|
|
ctx=None,
|
|
|
|
target="llvm"):
|
|
|
|
"""Factory function to create an executor.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
kind : str
|
|
|
|
The type of executor
|
|
|
|
|
2018-12-02 09:27:49 +03:00
|
|
|
mod : :py:class:`~tvm.relay.module.Module`
|
2018-11-26 21:45:08 +03:00
|
|
|
The Relay module containing collection of functions
|
2018-11-05 09:02:27 +03:00
|
|
|
|
2018-12-02 09:27:49 +03:00
|
|
|
ctx : :py:class:`tvm.TVMContext`
|
2018-11-05 09:02:27 +03:00
|
|
|
The context to execute the code.
|
|
|
|
|
2018-12-02 09:27:49 +03:00
|
|
|
target : :py:class:`tvm.Target`
|
2018-11-05 09:02:27 +03:00
|
|
|
The corresponding context
|
|
|
|
"""
|
2019-07-02 05:29:56 +03:00
|
|
|
if mod is None:
|
|
|
|
mod = _Module()
|
2018-11-05 09:02:27 +03:00
|
|
|
if ctx is not None:
|
|
|
|
assert ctx.device_type == _nd.context(str(target), 0).device_type
|
|
|
|
else:
|
|
|
|
ctx = _nd.context(str(target), 0)
|
|
|
|
|
|
|
|
if isinstance(target, str):
|
|
|
|
target = _target.create(target)
|
|
|
|
if kind == "debug":
|
|
|
|
return _interpreter.Interpreter(mod, ctx, target)
|
2019-02-21 21:43:07 +03:00
|
|
|
if kind == "graph":
|
2018-11-05 09:02:27 +03:00
|
|
|
return GraphExecutor(mod, ctx, target)
|
2019-05-09 09:09:15 +03:00
|
|
|
elif kind == "vm":
|
|
|
|
return VMExecutor(mod, ctx, target)
|
|
|
|
else:
|
|
|
|
raise RuntimeError("unknown execution strategy: {0}".format(kind))
|