Fixing LRN op to match ONNX 1.2 spec. Size attribute has semantics of diameter and not radius, as was the case before.

This commit is contained in:
Spandan Tiwari 2018-07-02 15:19:14 -07:00
Родитель 63603e0f70
Коммит dd9705bfec
3 изменённых файлов: 8 добавлений и 10 удалений

Просмотреть файл

@ -2891,7 +2891,7 @@ void CNTKToONNXHelper::CopyAttributes(const FunctionPtr& src, LotusIR::Node* nod
auto alpha = (float)src->Attributes()[L"alpha"].Value<double>();
auto beta = (float)src->Attributes()[L"beta"].Value<double>();
node->AddAttribute(attributesMap[L"size"], depthRadius);
node->AddAttribute(attributesMap[L"size"], 2*depthRadius + 1);
node->AddAttribute(attributesMap[L"bias"], bias);
node->AddAttribute(attributesMap[L"alpha"], alpha);
node->AddAttribute(attributesMap[L"beta"], beta);

Просмотреть файл

@ -2019,14 +2019,12 @@ FunctionPtr ONNXToCNTKHelper::CreateFunction(const Node *node, const std::vector
}
else if (onnxOpName == "LRN")
{
// TODO: this is experimental code to load Facebook Caffe models.
// Operators are added so hopefully there is not further work needed.
size_t depthRadius = GetNamedAttributeAsInt64(node, "size");
double bias = GetNamedAttributeAsFloat(node, "bias");
double alpha = GetNamedAttributeAsFloat(node, "alpha");
double beta = GetNamedAttributeAsFloat(node, "beta");
FunctionPtr cntkFunction = LocalResponseNormalization(inputs[0],
depthRadius, bias, alpha, beta, ToWString(node->Name()));
size_t depthRadius = (GetNamedAttributeAsInt64(node, "size") - 1)/2;
double bias = static_cast<double>(GetNamedAttributeAsFloat(node, "bias", 1.0f));
double alpha = static_cast<double>(GetNamedAttributeAsFloat(node, "alpha", 1e-4f));
double beta = static_cast<double>(GetNamedAttributeAsFloat(node, "beta", 0.75f));
FunctionPtr cntkFunction = LocalResponseNormalization(inputs[0],
depthRadius, bias, alpha, beta, ToWString(node->Name()));
return cntkFunction;
}
else if (onnxOpName == "AveragePool" || onnxOpName == "MaxPool")

Просмотреть файл

@ -625,7 +625,7 @@ def test_LogSoftmax(tmpdir, dtype):
@pytest.mark.parametrize("dtype", DType_Config)
def test_LRN(tmpdir, dtype, device_id):
if device_id == -1 and dtype == np.float16:
pytest.skip('Test is skipped on CPU with float16 data')
pytest.skip('Test is skipped on CPU with float16 data, because it uses convolution.')
device = cntk_device(device_id)
with C.default_options(dtype=dtype):
img_shape = (64, 32, 32)