Update pooling shape attribute before reaching pooling node.

This commit is contained in:
Emad Barsoum 2016-11-08 17:31:56 -08:00
Родитель 01929c4915
Коммит 662bebe271
4 изменённых файлов: 21 добавлений и 18 удалений

Просмотреть файл

@ -782,6 +782,8 @@ namespace CNTK
NDShape outputMapCount = { 1 };
std::vector<bool> sharing = { true };
outputShape = ConvolutionOpOutputShape(op, inputs[0].Shape(), poolingWindowsShape, outputMapCount, strides, sharing, autoPadding, lowerPad, upperPad, false, inferDimensions);
functionConfig[PrimitiveFunction::AttributeNamePoolingWindowShape] = poolingWindowsShape;
break;
}
case PrimitiveOpType::SumAll:
@ -820,9 +822,9 @@ namespace CNTK
case PrimitiveOpType::Times:
{
assert(inputs.size() == 2);
auto outputRank = functionConfig[PrimitiveFunction::AttributeNameOutputRank].Value<size_t>();
auto inferInputRankToMap = functionConfig[PrimitiveFunction::AttributeNameInferInputRankToMap].Value<int>();
outputShape = TimesOpOutputShape(inputs[0], inputs[1], outputRank, inferInputRankToMap, inferDimensions);
auto outputRank = functionConfig[PrimitiveFunction::AttributeNameOutputRank].Value<size_t>();
auto inferInputRankToMap = functionConfig[PrimitiveFunction::AttributeNameInferInputRankToMap].Value<int>();
outputShape = TimesOpOutputShape(inputs[0], inputs[1], outputRank, inferInputRankToMap, inferDimensions);
break;
}
case PrimitiveOpType::TransposeTimes:
@ -852,9 +854,9 @@ namespace CNTK
case PrimitiveOpType::Convolution:
{
assert(inputs.size() == 2);
auto& strides = functionConfig[PrimitiveFunction::AttributeNameStrides].Value<NDShape>();
auto& lowerPad = functionConfig[PrimitiveFunction::AttributeNameLowerPad].Value<NDShape>();
auto& upperPad = functionConfig[PrimitiveFunction::AttributeNameUpperPad].Value<NDShape>();
auto& strides = functionConfig[PrimitiveFunction::AttributeNameStrides].Value<NDShape>();
auto& lowerPad = functionConfig[PrimitiveFunction::AttributeNameLowerPad].Value<NDShape>();
auto& upperPad = functionConfig[PrimitiveFunction::AttributeNameUpperPad].Value<NDShape>();
auto sharing = AsVector<bool>(functionConfig[PrimitiveFunction::AttributeNameSharing].Value<std::vector<DictionaryValue>>());
auto autoPadding = AsVector<bool>(functionConfig[PrimitiveFunction::AttributeNameAutoPadding].Value<std::vector<DictionaryValue>>());
bool transpose = functionConfig[PrimitiveFunction::AttributeNameTranspose].Value<bool>();

Просмотреть файл

@ -783,10 +783,6 @@ public:
const auto& inputShape = GetInputSampleLayout(0);
// In case of pooling if the kernel shape is unknown, then treat it as global pooling.
if (m_kernelShape == NDShape::Unknown)
m_kernelShape = inputShape.SubShape(0, inputShape.Rank()-1);
// infer reduction dimensions if not given
InferReductionDims(inputShape, TensorShape());

Просмотреть файл

@ -171,11 +171,12 @@ def Convolution(filter_shape, # e.g. (3,3)
apply_x = apply_x >> activation
return Block(apply_x, 'Convolution', Record(W=W, b=b))
# MaxPooling, AveragePooling -- create a max- or average-pooling layer
# TODO: do we need MaxPooling and AveragePooling?
# TODO: This is not really a layer as it does not hold learnable parameters. So:
# - keep it in layer format, since users may think about it this way?
# - turn it into a function (lower-case)? Then how would it work inside Sequential() (we'd need partial application)?
# Create a Pooling layer with one of following types:
#
# MaxPooling and GlobalMaxPooling
# AveragePooling and GlobalAveragePooling
#
# Setting the filter_shape to None, mean global pooling.
from cntk.cntk_py import PoolingType_Max, PoolingType_Average
def Pooling(op, # PoolingType_Max or _Average
filter_shape, # e.g. (3,3)
@ -192,21 +193,25 @@ def Pooling(op, # PoolingType_Max or _Average
raise ValueError('Pooling: op must be PoolingType_Max or PoolingType_average')
return Block(apply_x, op_name)
# MaxPooling
def MaxPooling(filter_shape, # e.g. (3,3)
strides=1,
pad=False):
return Pooling(PoolingType_Max, filter_shape, strides=strides, pad=pad)
# AveragePooling
def AveragePooling(filter_shape, # e.g. (3,3)
strides=1,
pad=False):
return Pooling(PoolingType_Average, filter_shape, strides=strides, pad=pad)
# GlobalMaxPooling
def GlobalMaxPooling():
return Pooling(PoolingType_Max, None)
return Pooling(PoolingType_Max, None, pad=False)
# GlobalAveragePooling
def GlobalAveragePooling():
return Pooling(PoolingType_Average, None)
return Pooling(PoolingType_Average, None, pad=False)
# Recurrence() -- run a block recurrently over a time sequence
def Recurrence(over, go_backwards=False, initial_state=initial_state_default_or_None):

Просмотреть файл

@ -9,7 +9,7 @@ import math
import numpy as np
from cntk.blocks import default_options
from cntk.layers import Convolution, GlobalAveragePooling, Dropout, BatchNormalization, Dense
from cntk.layers import Convolution, AveragePooling, GlobalAveragePooling, Dropout, BatchNormalization, Dense
from cntk.models import Sequential, LayerStack
from cntk.utils import *
from cntk.io import MinibatchSource, ImageDeserializer, StreamDef, StreamDefs