Addressing CR comments
This commit is contained in:
Родитель
adcf0e92b3
Коммит
a3fe2f3d47
|
@ -3,6 +3,6 @@ __version__ = '1.5'
|
|||
from .context import *
|
||||
from .graph import *
|
||||
from .objectives import *
|
||||
from .cntk1_ops import *
|
||||
from .ops.cntk2 import *
|
||||
from .optimizer import *
|
||||
from .reader import *
|
||||
|
|
|
@ -7,7 +7,7 @@ import numpy as np
|
|||
import shutil as sh
|
||||
|
||||
from cntk.graph import ComputationNode
|
||||
from cntk.cntk1_ops import NewReshape
|
||||
from cntk.ops.cntk1 import NewReshape
|
||||
from cntk.utils import CNTK_EXECUTABLE_PATH
|
||||
|
||||
|
||||
|
@ -197,10 +197,10 @@ class AbstractContext(object, metaclass=ABCMeta):
|
|||
fn = os.path.join(self.directory, 'dummy_input.txt')
|
||||
from .reader import NumPyReader
|
||||
reader = NumPyReader(data, fn)
|
||||
from .cntk1_ops import Input
|
||||
from .ops.cntk1 import Input
|
||||
dummy_input_node = Input(2, var_name='dummy_node')
|
||||
reader.add_input(dummy_input_node, 0, 2)
|
||||
model_description += "\ndummy_node=Input(2, tag='output')"
|
||||
model_description += "dummy_node = Input(2, tag='output')"
|
||||
readers.append(reader)
|
||||
|
||||
tmpl = open(CNTK_EVAL_TEMPLATE_PATH, "r").read()
|
||||
|
@ -260,7 +260,7 @@ class AbstractContext(object, metaclass=ABCMeta):
|
|||
class Context(AbstractContext):
|
||||
|
||||
'''
|
||||
This is a sub-class of AbstractContext, use it to run CNTK locally.
|
||||
This is a sub-class of AbstractContext, use it to run CNTK locally.
|
||||
'''
|
||||
|
||||
def _call_cntk(self, config_file_name, config_content):
|
||||
|
@ -400,6 +400,6 @@ class Context(AbstractContext):
|
|||
|
||||
class ClusterContext(AbstractContext):
|
||||
'''
|
||||
This is a sub-class of AbstractContext, use it to submit your workloads to the cluster.
|
||||
This is a sub-class of AbstractContext, use it to submit your workloads to the cluster.
|
||||
'''
|
||||
pass
|
||||
|
|
|
@ -5,6 +5,9 @@ import scipy.sparse as sparse
|
|||
def _tuple_to_cntk_shape(shape):
|
||||
return ':'.join(str(v) for v in shape)
|
||||
|
||||
# Indent model description by how many spaces
|
||||
MODEL_INDENTATION = 8
|
||||
|
||||
class ComputationNode(object):
|
||||
'''
|
||||
Base class for all nodes and operators. Provides a NumPy-like interface
|
||||
|
@ -177,7 +180,7 @@ class ComputationNode(object):
|
|||
|
||||
params = self._get_cntk_param_string(param_variable_names)
|
||||
|
||||
line = "%s = %s(%s)" % (self.var_name, self.name, params)
|
||||
line = ' '*MODEL_INDENTATION + "%s = %s(%s)" % (self.var_name, self.name, params)
|
||||
desc.append(line)
|
||||
|
||||
return self.var_name, node_counter, desc
|
||||
|
@ -262,8 +265,8 @@ class ImageInputComputationNodeBase(ComputationNode, metaclass=ABCMeta):
|
|||
raise NotImplementedError
|
||||
|
||||
# importing after defining ComputationNode to work around circular imports
|
||||
from cntk.cntk1_ops import *
|
||||
from cntk import cntk1_ops # to have a separate namespace when we want to override below
|
||||
from cntk.ops.cntk1 import *
|
||||
from cntk.ops import cntk1 as cntk1_ops # to have a separate namespace when we want to override below
|
||||
from .reader import UCIFastReader, CNTKTextFormatReader
|
||||
|
||||
# redefine some operators to work with NumPy and sequences as input
|
||||
|
|
|
@ -54,16 +54,17 @@ class UCIFastReader(AbstractReader):
|
|||
def generate_config(self):
|
||||
"""Generate the reader configuration block
|
||||
"""
|
||||
template = ''' reader = [
|
||||
readerType = "%(ReaderType)s"
|
||||
file = "%(FileName)s"
|
||||
randomize = "none"
|
||||
verbosity = 1
|
||||
'''
|
||||
template = '''\
|
||||
reader = [
|
||||
readerType = "%(ReaderType)s"
|
||||
file = "%(FileName)s"
|
||||
randomize = "none"
|
||||
verbosity = 1
|
||||
'''
|
||||
|
||||
if self['CustomDelimiter'] is not None:
|
||||
template += '''
|
||||
customDelimiter=%(CustomDelimiter)s
|
||||
template += '''\
|
||||
customDelimiter = %(CustomDelimiter)s
|
||||
'''
|
||||
|
||||
if self.inputs_def is not None:
|
||||
|
@ -73,28 +74,28 @@ class UCIFastReader(AbstractReader):
|
|||
else:
|
||||
name = name_or_node
|
||||
|
||||
template += '''
|
||||
{0}=[
|
||||
start = {1}
|
||||
dim = {2}
|
||||
template += '''\
|
||||
{0} = [
|
||||
start = {1}
|
||||
dim = {2}
|
||||
'''.format(name, start, dim)
|
||||
|
||||
if num_of_classes:
|
||||
template += '''
|
||||
labelDim= {0}
|
||||
template += '''\
|
||||
labelDim= {0}
|
||||
'''.format(num_of_classes)
|
||||
if map_file:
|
||||
template += '''
|
||||
labelMappingFile= "{0}"
|
||||
template += '''\
|
||||
labelMappingFile= "{0}"
|
||||
'''.format(map_file)
|
||||
|
||||
template += '''
|
||||
]
|
||||
'''
|
||||
template += '''\
|
||||
]
|
||||
'''
|
||||
|
||||
template += '''
|
||||
]
|
||||
'''
|
||||
template += '''\
|
||||
]
|
||||
'''
|
||||
|
||||
return template % self
|
||||
|
||||
|
|
|
@ -9,10 +9,10 @@ Eval=[
|
|||
run=BrainScriptNetworkBuilder
|
||||
|
||||
BrainScriptNetworkBuilder=[
|
||||
%(ModelDescription)s
|
||||
%(ModelDescription)s
|
||||
]
|
||||
|
||||
%(Reader)s
|
||||
%(Reader)s
|
||||
|
||||
outputPath = "%(OutputFile)s"
|
||||
]
|
||||
|
|
|
@ -9,7 +9,7 @@ deviceId=%(DevideId)s
|
|||
Predict=[
|
||||
action="write"
|
||||
|
||||
%(Reader)s
|
||||
%(Reader)s
|
||||
|
||||
outputPath = "%(PredictOutputFile)s" # dump the output as text
|
||||
]
|
||||
|
|
|
@ -9,7 +9,7 @@ deviceId=%(DevideId)s
|
|||
Test=[
|
||||
action="test"
|
||||
|
||||
%(Reader)s
|
||||
%(Reader)s
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -11,13 +11,13 @@ Train=[
|
|||
run=BrainScriptNetworkBuilder
|
||||
|
||||
BrainScriptNetworkBuilder=[
|
||||
%(ModelDescription)s
|
||||
%(ModelDescription)s
|
||||
]
|
||||
|
||||
SGD = [
|
||||
%(SGD)s
|
||||
%(SGD)s
|
||||
]
|
||||
|
||||
%(Reader)s
|
||||
%(Reader)s
|
||||
]
|
||||
|
||||
|
|
|
@ -51,71 +51,73 @@ def test_overload_exception():
|
|||
with pytest.raises(ValueError):
|
||||
C(range(0, 10))[0:3:2]
|
||||
|
||||
def _to_list(desc):
|
||||
return [line.strip() for line in desc.split('\n')]
|
||||
|
||||
@pytest.mark.parametrize("root_node, expected", [
|
||||
(C(2, var_name='c0'), "c0 = Constant(2, rows=1, cols=1)"),
|
||||
# Input should behave as Constant in case of scalars
|
||||
(I([1,2], var_name='i1'), "i1 = Input(2:1, tag='feature')"),
|
||||
(C(2, var_name='c0'), ["c0 = Constant(2, rows=1, cols=1)"]),
|
||||
# Input should behave as Constant in case of scalars
|
||||
(I([1,2], var_name='i1'), ["i1 = Input(2:1, tag='feature')"]),
|
||||
(Plus(C(0), C(1)),
|
||||
"v0 = Constant(0, rows=1, cols=1)\nv1 = Constant(1, rows=1, cols=1)\nv2 = Plus(v0, v1)"),
|
||||
["v0 = Constant(0, rows=1, cols=1)", "v1 = Constant(1, rows=1, cols=1)", "v2 = Plus(v0, v1)"]),
|
||||
])
|
||||
def test_description(root_node, expected):
|
||||
description, has_inputs, readers = root_node.to_config()
|
||||
assert description == expected
|
||||
assert _to_list(description) == expected
|
||||
|
||||
def test_graph_with_same_node_twice():
|
||||
v0 = C(1)
|
||||
root_node = Plus(v0, v0)
|
||||
expected = 'v0 = Constant(1, rows=1, cols=1)\nv1 = Plus(v0, v0)'
|
||||
expected = ['v0 = Constant(1, rows=1, cols=1)', 'v1 = Plus(v0, v0)']
|
||||
description, has_inputs, readers = root_node.to_config()
|
||||
assert description == expected
|
||||
assert _to_list(description) == expected
|
||||
assert readers == []
|
||||
|
||||
@pytest.mark.parametrize("alias, data, expected", [
|
||||
('', [A([1,0]), A([0,0,1,0])], ValueError), # no alias given
|
||||
('A', [object()], ValueError),
|
||||
])
|
||||
('', [A([1,0]), A([0,0,1,0])], ValueError), # no alias given
|
||||
('A', [object()], ValueError),
|
||||
])
|
||||
def test_sequence_conversion_exceptions(alias, data, expected):
|
||||
with pytest.raises(expected):
|
||||
_seq_to_text_format(data, alias=alias)
|
||||
with pytest.raises(expected):
|
||||
_seq_to_text_format(data, alias=alias)
|
||||
|
||||
def test_constant_var_name():
|
||||
var_name = 'NODE'
|
||||
node = C([A([])], var_name=var_name)
|
||||
assert node.var_name == var_name
|
||||
var_name = 'NODE'
|
||||
node = C([A([])], var_name=var_name)
|
||||
assert node.var_name == var_name
|
||||
|
||||
@pytest.mark.parametrize("alias, data, expected", [
|
||||
('W', [A([])], """\
|
||||
('W', [A([])], """\
|
||||
0|W \
|
||||
"""),
|
||||
('W', [A([1,0]), A([0,0,1,0])], """\
|
||||
('W', [A([1,0]), A([0,0,1,0])], """\
|
||||
0|W 1 0
|
||||
1|W 0 0 1 0\
|
||||
"""),
|
||||
])
|
||||
])
|
||||
def test_sequence_conversion_dense(alias, data, expected):
|
||||
assert _seq_to_text_format(data, alias=alias) == expected
|
||||
assert _seq_to_text_format(data, alias=alias) == expected
|
||||
|
||||
if False:
|
||||
@pytest.mark.parametrize("alias, data, expected", [
|
||||
('W', [A({})], """\
|
||||
0|W \
|
||||
"""),
|
||||
('W', [{3:1, 50:1, 2:0}, {1:-5}], """\
|
||||
0|W 2:0 3:1 50:1
|
||||
1|W 1:-5\
|
||||
"""),
|
||||
])
|
||||
def test_sequence_conversion_sparse(alias, data, expected):
|
||||
# We use the dictionary in data to create a SciPy sparse dictionary of
|
||||
# keys, which we then feed to the converter.
|
||||
dok_data = []
|
||||
for data_elem in data:
|
||||
d = scipy.sparse.dok_matrix((100,1))
|
||||
for k,v in data_elem.items():
|
||||
d[k] = v
|
||||
dok_data.append(d)
|
||||
assert _seq_to_text_format(dok_data, alias=alias) == expected
|
||||
@pytest.mark.parametrize("alias, data, expected", [
|
||||
('W', [A({})], """\
|
||||
0|W \
|
||||
"""),
|
||||
('W', [{3:1, 50:1, 2:0}, {1:-5}], """\
|
||||
0|W 2:0 3:1 50:1
|
||||
1|W 1:-5\
|
||||
"""),
|
||||
])
|
||||
def test_sequence_conversion_sparse(alias, data, expected):
|
||||
# We use the dictionary in data to create a SciPy sparse dictionary of
|
||||
# keys, which we then feed to the converter.
|
||||
dok_data = []
|
||||
for data_elem in data:
|
||||
d = scipy.sparse.dok_matrix((100,1))
|
||||
for k,v in data_elem.items():
|
||||
d[k] = v
|
||||
dok_data.append(d)
|
||||
assert _seq_to_text_format(dok_data, alias=alias) == expected
|
||||
|
||||
@pytest.mark.parametrize("data, expected", [
|
||||
([], True),
|
||||
|
@ -125,10 +127,9 @@ if False:
|
|||
([[A([1,2])]], False),
|
||||
([A([1,2])], False),
|
||||
([A([1,2]), A([])], False),
|
||||
])
|
||||
])
|
||||
def test_is_tensor(data, expected):
|
||||
#import ipdb;ipdb.set_trace()
|
||||
assert is_tensor(data) == expected
|
||||
assert is_tensor(data) == expected
|
||||
|
||||
@pytest.mark.parametrize("data, expected", [
|
||||
([], False),
|
||||
|
@ -138,7 +139,7 @@ def test_is_tensor(data, expected):
|
|||
([[A([1,2])]], False),
|
||||
([A([1,2])], True),
|
||||
([A([1,2]), A([])], True),
|
||||
])
|
||||
])
|
||||
def test_is_sequence(data, expected):
|
||||
assert is_sequence(data) == expected
|
||||
assert is_sequence(data) == expected
|
||||
|
||||
|
|
|
@ -80,5 +80,5 @@ def test_overload_eval(root_node, expected):
|
|||
(C(np.asarray([[1,2],[3,4]]))*C(np.asarray([[1,2],[3,4]])), [[1,4],[9,16]]),
|
||||
])
|
||||
def test_ops_on_numpy(root_node, expected, tmpdir):
|
||||
_test(root_node, expected, clean_up=True)
|
||||
_test(root_node, expected, clean_up=False)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче