folder renaming
This commit is contained in:
Родитель
ee3b2f9baf
Коммит
89565ca89d
|
@ -0,0 +1,81 @@
|
|||
import numpy as np
|
||||
from ..context import *
|
||||
|
||||
|
||||
def test_parse_shapes_1():
|
||||
output = '''\
|
||||
FormNestedNetwork: WARNING: Was called twice for v3 Plus operation
|
||||
|
||||
Validating network. 5 nodes to process in pass 1.
|
||||
|
||||
Validating --> dummy_node = InputValue() : -> [2 {1} x *]
|
||||
Validating --> v0 = LearnableParameter() : -> [4 x 1 {1,4}]
|
||||
Validating --> v1 = Reshape (v0) : [4 x 1 {1,4}] -> [2 x 2 {1,2}]
|
||||
Validating --> v2 = LearnableParameter() : -> [1 x 1 {1,1}]
|
||||
Validating --> v3 = Plus (v1, v2) : [2 x 2 {1,2}], [1 x 1 {1,1}] -> [2 x 2 {1,2}]
|
||||
|
||||
Validating network. 2 nodes to process in pass 2.
|
||||
|
||||
|
||||
Validating network, final pass.
|
||||
|
||||
|
||||
|
||||
5 out of 5 nodes do not share the minibatch layout with the input data.
|
||||
|
||||
Post-processing network complete.
|
||||
'''
|
||||
|
||||
expected = {
|
||||
'dummy_node': (2, np.NaN),
|
||||
'v0': (4, 1),
|
||||
'v1': (2, 2),
|
||||
'v2': (1, 1),
|
||||
'v3': (2, 2)
|
||||
}
|
||||
|
||||
assert Context._parse_shapes_from_output(output) == expected
|
||||
|
||||
def test_parse_shapes_2():
|
||||
output = '''\
|
||||
Validating --> v1 = LearnableParameter() : -> [3 x 2 {1,3}]
|
||||
Validating --> v2 = InputValue() : -> [2 {1} x *]
|
||||
Validating --> v3 = Times (v1, v2) : [3 x 2 {1,3}], [2 {1} x *] -> [3 {1} x *]
|
||||
Validating --> v4 = LearnableParameter() : -> [3 x 1 {1,3}]
|
||||
Validating --> v5 = Plus (v3, v4) : [3 {1} x *], [3 x 1 {1,3}] -> [3 x 1 {1,3} x *]
|
||||
'''
|
||||
|
||||
expected = {
|
||||
'v1': (3, 2),
|
||||
'v2': (2, np.NaN),
|
||||
'v3': (3, np.NaN),
|
||||
'v4': (3, 1),
|
||||
'v5': (3, 1, np.NaN),
|
||||
}
|
||||
|
||||
assert Context._parse_shapes_from_output(output) == expected
|
||||
|
||||
def test_parse_eval_result_output_1():
|
||||
output = '''\
|
||||
0 |w.shape 1 1
|
||||
0 |w 60.000000
|
||||
1 |w.shape 1 2
|
||||
1 |w 22.000000
|
||||
1 |w 24.000000'''
|
||||
list_of_tensors = Context._parse_result_output(output)
|
||||
expected = [[[60]], [[22],[24]]]
|
||||
assert len(list_of_tensors) == len(expected)
|
||||
for res, exp in zip(list_of_tensors, expected):
|
||||
assert np.allclose(res, np.asarray(exp))
|
||||
|
||||
|
||||
def test_parse_test_result_output():
|
||||
output = '''\
|
||||
Final Results: Minibatch[1-1]: SamplesSeen = 500 v8: SquareError/Sample = 13.779223 v7: CrossEntropyWithSoftmax/Sample = 0.20016696 Perplexity = 1.2216067 '''
|
||||
result = Context._parse_test_result(output)
|
||||
|
||||
assert result['SamplesSeen'] == 500
|
||||
assert result['Perplexity'] == 1.2216067
|
||||
assert result['v8'] == 13.779223
|
||||
assert result['v7'] == 0.20016696
|
||||
assert len(result) == 4
|
|
@ -0,0 +1,69 @@
|
|||
from ..utils._fetch_ops import *
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_line, expected", [
|
||||
# Format of expected: [OperatorName, [(OperandName_1, OperandInitValue_1),
|
||||
# ...]]
|
||||
(r"Times(A, B, outputRank=1, tag='') = new ComputationNode [ operation = 'Times' ; inputs = ( A : B ) /*plus the function args*/ ]",
|
||||
['Times', [('A', None), ('B', None), ('outputRank', 1)]]),
|
||||
|
||||
|
||||
(r"Convolution(weightNode, inputValueNode, kernelWidth, kernelHeight, outputChannels, horizontalSubsample, verticalSubsample, zeroPadding = false, maxTempMemSizeInSamples = 0, imageLayout='CHW', tag='') = new ComputationNode [ operation = 'Convolution' ; inputs = (weightNode : inputValueNode) /*plus the function args*/ ]",
|
||||
['Convolution', [('weightNode', None), ('inputValueNode', None), ('kernelWidth', None), ('kernelHeight', None), ('outputChannels', None), ('horizontalSubsample', None), ('verticalSubsample', None), ('zeroPadding', False), ('maxTempMemSizeInSamples', 0), ('imageLayout', 'CHW')]]),
|
||||
|
||||
(r"LearnableParameter(rows, cols, learningRateMultiplier = 1.0, init = 'uniform'/*|fixedValue|gaussian|fromFile*/, initValueScale = 1, value = 0, initFromFilePath = '', initOnCPUOnly=true, randomSeed=-1, tag='') = new ComputationNode [ operation = 'LearnableParameter' ; shape = new TensorShape [ dims = (rows : cols) ] /*plus the function args*/ ]",
|
||||
['LearnableParameter', [('rows', None), ('cols', None), ('learningRateMultiplier', 1.0), ('init', 'uniform'), ('initValueScale', 1), ('value', 0), ('initFromFilePath', ''), ('initOnCPUOnly', True), ('randomSeed', -1)]]),
|
||||
])
|
||||
def test_parsing_comp_node(input_line, expected):
|
||||
match = REGEX_COMPNODE.match(input_line)
|
||||
po = CompNodeOperator(match)
|
||||
|
||||
assert po.name == expected[0]
|
||||
assert len(po.operands) == len(expected[1])
|
||||
|
||||
for po_op, (exp_op, exp_init) in zip(po.operands, expected[1]):
|
||||
assert po_op.name == exp_op
|
||||
assert po_op.init_value == exp_init
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_line, expected", [
|
||||
(r"Constant(val, rows = 1, cols = 1, tag='') = Parameter(rows, cols, learningRateMultiplier = 0, init = 'fixedValue', value = val)",
|
||||
['Constant', [('value', None), ('rows', 1), ('cols', 1)]]), # note that we changed 'val' to 'value'
|
||||
])
|
||||
def test_parsing_inst_node(input_line, expected):
|
||||
match = REGEX_INSTANTIATION.match(input_line)
|
||||
po = InstantiationOperator(match)
|
||||
|
||||
assert po.name == expected[0]
|
||||
assert len(po.operands) == len(expected[1])
|
||||
|
||||
for po_op, (exp_op, exp_init) in zip(po.operands, expected[1]):
|
||||
assert po_op.name == exp_op
|
||||
assert po_op.init_value == exp_init
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_line, expected", [
|
||||
(r"Length(x) = new NumericFunction [ what = 'Length' ; arg = x ]",
|
||||
['Length', [('x', None)]]),
|
||||
|
||||
(r"Ceil(x) = -Floor(-x)",
|
||||
['Ceil', [('x', None)]]),
|
||||
|
||||
(r"Round(x) = Floor(x+0.5)",
|
||||
['Round', [('x', None)]]),
|
||||
|
||||
(r"Abs(x) = if x >= 0 then x else -x",
|
||||
['Abs', [('x', None)]]),
|
||||
])
|
||||
def test_parsing_standard_node(input_line, expected):
|
||||
match = REGEX_STANDARD.match(input_line)
|
||||
po = CompNodeOperator(match)
|
||||
|
||||
assert po.name == expected[0]
|
||||
assert len(po.operands) == len(expected[1])
|
||||
|
||||
for po_op, (exp_op, exp_init) in zip(po.operands, expected[1]):
|
||||
assert po_op.name == exp_op
|
||||
assert po_op.init_value == exp_init
|
|
@ -0,0 +1,142 @@
|
|||
from ..context import get_new_context, _CONTEXT
|
||||
from ..graph import *
|
||||
from ..graph import _tensor_to_text_format
|
||||
|
||||
import pytest
|
||||
|
||||
import scipy.sparse
|
||||
|
||||
# keeping things short
|
||||
A = np.asarray
|
||||
C = constant
|
||||
I = input
|
||||
|
||||
|
||||
# testing whether operator overloads result in proper type
|
||||
@pytest.mark.parametrize('root_node, expected', [
|
||||
# __add__ / __radd__
|
||||
(C(0) + C(1), Plus),
|
||||
(C(0) + 1, Plus),
|
||||
(0 + C(1), Plus),
|
||||
(0 + 1, int),
|
||||
|
||||
# __sub__ / __rsub__
|
||||
(C(0) - C(1), Minus),
|
||||
(C(0) - 1, Minus),
|
||||
(0 - C(1), Minus),
|
||||
(0 - 1, int),
|
||||
|
||||
# __mul__ / __rmul__ --> element-wise (!) multiplication
|
||||
(C(0) * C(1), ElementTimes),
|
||||
(C(0) * 1, ElementTimes),
|
||||
(0 * C(1), ElementTimes),
|
||||
(0 * 1, int),
|
||||
|
||||
# __abs__
|
||||
(abs(C(0)), Abs),
|
||||
|
||||
# __getitem__
|
||||
(C(np.arange(0, 10))[2:5], RowSlice),
|
||||
(C(np.arange(0, 10))[:5], RowSlice),
|
||||
|
||||
])
|
||||
def test_overload_types(root_node, expected):
|
||||
assert isinstance(root_node, expected)
|
||||
|
||||
|
||||
def test_overload_exception():
|
||||
with pytest.raises(ValueError):
|
||||
C(range(0, 10))[:]
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
C(range(0, 10))[0:3:2]
|
||||
|
||||
|
||||
def _to_list(desc):
|
||||
return [line.strip() for line in desc.split('\n')]
|
||||
|
||||
|
||||
def test_graph_with_same_node_twice():
|
||||
v0 = Constant(1)
|
||||
root_node = Plus(v0, v0)
|
||||
description, has_inputs, readers = root_node.to_config()
|
||||
assert len(_to_list(description)) == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize("alias, idx, data, expected", [
|
||||
('', 0, [A([1, 0]), A([0, 0, 1, 0])], ValueError), # no alias given
|
||||
('A', 0, [object()], ValueError),
|
||||
])
|
||||
def test_tensor_conversion_exceptions(alias, idx, data, expected):
|
||||
with pytest.raises(expected):
|
||||
_tensor_to_text_format(idx, alias, data)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("alias, idx, data, expected", [
|
||||
('W', 0, A([]), "0 |W "),
|
||||
('W', 0, A([[1, 0, 0, 0], [1, 0, 0, 0]]), """\
|
||||
0 |W 1 1 0 0 0 0 0 0\
|
||||
"""),
|
||||
])
|
||||
def test_tensor_conversion_dense(alias, idx, data, expected):
|
||||
assert _tensor_to_text_format(idx, alias, data,
|
||||
has_sequence_dimension=False) == expected
|
||||
|
||||
if False:
|
||||
@pytest.mark.parametrize("alias, data, expected", [
|
||||
('W', [A({})], ""),
|
||||
('W', [{3: 1, 50: 1, 2: 0}, {1: -5}], """\
|
||||
0 |W 2:0 3:1 50:1
|
||||
1 |W 1:-5\
|
||||
"""),
|
||||
])
|
||||
def test_tensor_conversion_sparse(alias, data, expected):
|
||||
# We use the dictionary in data to create a SciPy sparse dictionary of
|
||||
# keys, which we then feed to the converter.
|
||||
dok_data = []
|
||||
for idx, data_elem in enumerate(data):
|
||||
d = scipy.sparse.dok_matrix((100, 1))
|
||||
for k, v in data_elem.items():
|
||||
d[k] = v
|
||||
dok_data.append(d)
|
||||
assert _tensor_to_text_format(idx, alias, dok_data) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data, expected", [
|
||||
([], True),
|
||||
([1], True),
|
||||
([[1, 2]], True),
|
||||
([[]], True),
|
||||
([[A([1, 2])]], False),
|
||||
([A([1, 2])], False),
|
||||
([A([1, 2]), A([])], False),
|
||||
])
|
||||
def test_is_tensor(data, expected):
|
||||
assert is_tensor(data) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data, expected", [
|
||||
([], False),
|
||||
([1], False),
|
||||
([[1, 2]], False),
|
||||
([[]], False),
|
||||
([[A([1, 2])]], False),
|
||||
([A([1, 2])], True),
|
||||
([A([1, 2]), A([])], True),
|
||||
])
|
||||
def test_is_tensor_list(data, expected):
|
||||
assert is_tensor_list(data) == expected
|
||||
|
||||
def test_loose_coupling():
|
||||
from cntk.ops.cntk1 import PastValue
|
||||
dh = PastValue(1, 'outnode')
|
||||
out = Times(dh, Constant(2), var_name='outnode')
|
||||
|
||||
expected = ['v0 = PastValue(1, outnode, timeStep=1, defaultHiddenActivation=0.1)',
|
||||
'v1 = Constant(2, rows=1, cols=1)',
|
||||
'outnode = Times(v0, v1, outputRank=1)']
|
||||
|
||||
description, has_inputs, readers = out.to_config()
|
||||
assert _to_list(description) == expected
|
||||
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
# Here should all the functional operator tests go.
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from ..context import get_new_context
|
||||
from ..graph import *
|
||||
from ..reader import *
|
||||
|
||||
# keeping things short
|
||||
C = constant
|
||||
I = input
|
||||
AA = np.asarray
|
||||
|
||||
def _test(root_node, expected, clean_up=True, backward_pass = False, input_node = None):
|
||||
with get_new_context() as ctx:
|
||||
ctx.clean_up = clean_up
|
||||
assert not ctx.input_nodes
|
||||
result = ctx.eval(root_node, None, backward_pass, input_node)
|
||||
|
||||
assert len(result) == len(expected)
|
||||
for res, exp in zip(result, expected):
|
||||
assert np.allclose(res, exp)
|
||||
assert res.shape == AA(exp).shape
|
||||
|
||||
C_VALUES = [0, [[1, 2], [3, 4]]]
|
||||
|
||||
@pytest.fixture(scope="module", params=C_VALUES)
|
||||
def c_arg(request):
|
||||
return request.param
|
||||
|
||||
c_left_arg = c_arg
|
||||
c_right_arg = c_arg
|
||||
|
||||
#TODO: broken due to a problem in CNTK. Once fixed merge them with the tests in linear_test.py
|
||||
if False:
|
||||
def test_op_add_constant(c_left_arg, c_right_arg):
|
||||
expected = [AA(c_left_arg) + AA(c_right_arg)]
|
||||
_test(C(c_left_arg) + c_right_arg, expected)
|
||||
_test(C(c_left_arg) + C(c_right_arg), expected)
|
||||
_test(c_left_arg + C(c_right_arg), expected)
|
||||
_test(c_left_arg + C(c_left_arg) + c_right_arg, c_left_arg+expected)
|
||||
|
||||
def test_op_minus_constant(c_left_arg, c_right_arg):
|
||||
expected = [AA(c_left_arg) - AA(c_right_arg)]
|
||||
_test(C(c_left_arg) - c_right_arg, expected)
|
||||
_test(C(c_left_arg) - C(c_right_arg), expected)
|
||||
_test(c_left_arg - C(c_right_arg), expected)
|
||||
_test(c_left_arg - C(c_left_arg) + c_right_arg, c_left_arg-expected)
|
||||
|
||||
def test_op_times_constant(c_left_arg, c_right_arg):
|
||||
expected = [AA(c_left_arg) * AA(c_right_arg)]
|
||||
_test(C(c_left_arg) * c_right_arg, expected)
|
||||
_test(C(c_left_arg) * C(c_right_arg), expected)
|
||||
_test(c_left_arg * C(c_right_arg), expected)
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
import pytest
|
||||
|
||||
from ..reader import *
|
||||
from ..graph import *
|
||||
from ..context import *
|
||||
from ..ops import cntk1 as cntk1_ops
|
||||
|
||||
allclose = np.testing.assert_allclose
|
||||
|
||||
|
||||
def test_NumPyReader(tmpdir):
|
||||
data = [[1, 2], [3, 4]]
|
||||
fn = str(tmpdir / 'test.txt')
|
||||
reader = NumPyReader(data, fn)
|
||||
|
||||
input_node = cntk1_ops.Input(2, var_name='testInput')
|
||||
reader.add_input(input_node, 0, 2)
|
||||
out = input_node + 2
|
||||
|
||||
with get_new_context() as ctx:
|
||||
result = ctx.eval(out, reader)
|
||||
for r, d in zip(result, data):
|
||||
assert np.all(r== np.asarray(d) + 2)
|
||||
|
||||
# TODO test other readers
|
Загрузка…
Ссылка в новой задаче