[AUTOTVM] Misc fix to document and style (#2035)

This commit is contained in:
Lianmin Zheng 2018-10-30 19:20:53 -07:00 коммит произвёл Tianqi Chen
Родитель 8ad3bd5680
Коммит f2d7787e57
7 изменённых файлов: 36 добавлений и 39 удалений

Просмотреть файл

@ -34,7 +34,7 @@ def get_network(name, batch_size, dtype='float32'):
elif name == 'mobilenet_v2':
net, params = nnvm.testing.mobilenet_v2.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'inception_v3':
input_shape = (1, 3, 299, 299)
input_shape = (batch_size, 3, 299, 299)
net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split('-')[1])

Просмотреть файл

@ -124,13 +124,15 @@ TVM package
~~~~~~~~~~~
The python package is located at `tvm/python`
There are several ways to install the package:
There are two ways to install the package:
1. Set the environment variable `PYTHONPATH` to tell python where to find
Method 1
This method is **recommended for developers** who may change the codes.
Set the environment variable `PYTHONPATH` to tell python where to find
the library. For example, assume we cloned `tvm` on the home directory
`~`. then we can added the following line in `~/.bashrc`.
It is **recommended for developers** who may change the codes.
The changes will be immediately reflected once you pulled the code and rebuild the project (no need to call ``setup`` again)
The changes will be immediately reflected once you pull the code and rebuild the project (no need to call ``setup`` again)
.. code:: bash
@ -138,7 +140,8 @@ There are several ways to install the package:
export PYTHONPATH=$TVM_HOME/python:$TVM_HOME/topi/python:$TVM_HOME/nnvm/python:${PYTHONPATH}
2. Install tvm python bindings by `setup.py`:
Method 2
Install tvm python bindings by `setup.py`:
.. code:: bash

Просмотреть файл

@ -551,7 +551,9 @@ def check_remote(target, device_key, host=None, port=None, priority=100, timeout
"""
def _check():
remote = request_remote(device_key, host, port, priority)
remote.context(str(target))
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)

Просмотреть файл

@ -252,13 +252,13 @@ Usage:
This record executable module has three modes.
* Print log file in readable format
e.g. python -m autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code
e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code
* Extract history best from a large log file
e.g. python -m autotvm.record --mode pick --i collect.log
e.g. python -m tvm.autotvm.record --mode pick --i collect.log
* Split a log file into separate files, each of which contains only a single wkl
e.g. python -m autotvm.record --mode split --i collect.log
e.g. python -m tvm.autotvm.record --mode split --i collect.log
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()

Просмотреть файл

@ -292,17 +292,13 @@ class ApplyHistoryBest(DispatchContext):
best_by_targetkey[key] = (inp, res)
# use model as key to build best map
for opt in inp.target.options:
if opt.startswith("-model"):
model = opt[7:]
key = (model, inp.task.workload)
if key not in best_by_model:
best_by_model[key] = (inp, res)
else:
_, other_res = best_by_model[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_model[key] = (inp, res)
break
key = (inp.target.model, inp.task.workload)
if key not in best_by_model:
best_by_model[key] = (inp, res)
else:
_, other_res = best_by_model[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_model[key] = (inp, res)
logger.debug("Finish loading %d records", counter)
@ -313,14 +309,11 @@ class ApplyHistoryBest(DispatchContext):
" above the dispatcher call. So does other target. ")
# first try matching by model
for opt in target.options:
if opt.startswith("-model"):
model = opt[7:]
key = (model, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_model:
return self.best_by_model[key][0].config
key = (target.model, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_model:
return self.best_by_model[key][0].config
# then try matching by target key
for k in target.keys:
@ -333,11 +326,9 @@ class ApplyHistoryBest(DispatchContext):
return None
def update(self, target, workload, cfg):
for opt in target.options:
if opt.startswith("-model"):
model = opt[7:]
key = (model, workload)
self._best_user_defined[key] = cfg
model = target.model
key = (model, workload)
self._best_user_defined[key] = cfg
for k in target.keys:
key = (k, workload)

Просмотреть файл

@ -1,7 +1,8 @@
"""Initializer of parameters."""
import numpy as np
import tvm
from tvm import relay
import numpy as np
class Initializer(object):
"""The base class of an initializer."""
@ -103,7 +104,7 @@ class Xavier(Initializer):
raise ValueError("Incorrect factor type")
# Hack for mobilenet, because there is less connectivity
if "depthwise" in name:
factor = 3 * 3
factor = hw_scale
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
arr[:] = np.random.uniform(-scale, scale, size=arr.shape)

Просмотреть файл

@ -419,7 +419,7 @@ def intel_graphics(model='unknown', options=None):
return _api_internal._TargetCreate("opencl", *opts)
def opengl(options=None):
def opengl(model='unknown', options=None):
"""Returns a OpenGL target.
Parameters
@ -427,8 +427,8 @@ def opengl(options=None):
options : str or list of str
Additional options
"""
options = _merge_opts([], options)
return _api_internal._TargetCreate("opengl", *options)
opts = _merge_opts(["-model=%s" % model], options)
return _api_internal._TargetCreate("opengl", *opts)
def arm_cpu(model='unknown', options=None):