This commit is contained in:
Project Philly 2017-02-16 18:55:26 -08:00
Родитель 7ea9a1c6e6 2eaef84d7b
Коммит 721a6f71a2
14 изменённых файлов: 59 добавлений и 58 удалений

Просмотреть файл

@ -32,7 +32,7 @@ TrainConvNet = {
x2s = SplitDimension(x2, 3, 1)
# 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
W = ParameterTensor{(1:1:2*n+1:1), learningRateMultiplier = 0, initValue = alpha/(2*n+1)}
y = Convolution (W, x2s, (1:1:2*n+1), mapDims = 1, stride = 1, sharing = true, autoPadding = true, lowerPad = 0, upperPad = 0, transpose = false, maxTempMemSizeInSamples = 0)
y = Convolution (W, x2s, (1:1:2*n+1), mapDims = 1, stride = 1, sharing = true, autoPadding = true, lowerPad = 0, upperPad = 0, maxTempMemSizeInSamples = 0)
# reshape back to remove the fake singleton reduction dimension
b = FlattenDimensions(y, 3, 2)
den = Exp (beta .* Log(k + b))

Просмотреть файл

@ -117,7 +117,7 @@ def convnetlrn_cifar10_dataaug(reader_train, reader_test, epoch_size=50000, max_
}
cntk.utils.log_number_of_parameters(z) ; print()
progress_printer = cntk.utils.ProgressPrinter(tag='Training')
progress_printer = cntk.utils.ProgressPrinter(tag='Training', num_epochs=max_epochs)
# perform model training
for epoch in range(max_epochs): # loop over epochs

Просмотреть файл

@ -84,10 +84,10 @@ def convnet_cifar10(debug_output=False):
}
cntk.utils.log_number_of_parameters(z) ; print()
progress_printer = cntk.utils.ProgressPrinter(tag='Training')
max_epochs = 30
progress_printer = cntk.utils.ProgressPrinter(tag='Training', num_epochs=max_epochs)
# Get minibatches of images to train with and perform model training
max_epochs = 30
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch

Просмотреть файл

@ -95,7 +95,7 @@ def convnet_cifar10_dataaug(reader_train, reader_test, epoch_size = 50000, max_e
}
cntk.utils.log_number_of_parameters(z) ; print()
progress_printer = cntk.utils.ProgressPrinter(tag='Training')
progress_printer = cntk.utils.ProgressPrinter(tag='Training', num_epochs=max_epochs)
# perform model training
for epoch in range(max_epochs): # loop over epochs

Просмотреть файл

@ -74,10 +74,10 @@ def convnet_mnist(debug_output=False):
}
cntk.utils.log_number_of_parameters(z) ; print()
progress_printer = cntk.utils.ProgressPrinter(tag='Training')
max_epochs = 40
progress_printer = cntk.utils.ProgressPrinter(tag='Training', num_epochs=max_epochs)
# Get minibatches of images to train with and perform model training
max_epochs = 40
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch

Просмотреть файл

@ -97,7 +97,7 @@ def train_and_evaluate(reader_train, reader_test, network_name, epoch_size, max_
}
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(tag='Training')
progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
# perform model training

Просмотреть файл

@ -7,6 +7,9 @@
from __future__ import print_function
import zipfile
import os
from sys import platform
import shutil
try:
from urllib.request import urlretrieve
except ImportError:
@ -26,6 +29,15 @@ def download_grocery_data():
print('Extracting ' + filename + '...')
with zipfile.ZipFile(filename) as myzip:
myzip.extractall(dataset_folder)
if platform != "win32":
testfile = os.path.join(dataset_folder, "grocery", "test.txt")
unixfile = os.path.join(dataset_folder, "grocery", "test_unix.txt")
out = open(unixfile, 'w')
with open(testfile) as f:
for line in f:
out.write(line.replace('\\', '/'))
out.close()
shutil.move(unixfile, testfile)
finally:
os.remove(filename)
print('Done.')
@ -34,4 +46,4 @@ def download_grocery_data():
if __name__ == "__main__":
download_grocery_data()

Просмотреть файл

@ -13,8 +13,7 @@ import cntk
# variables and stuff #
########################
cntk_dir = os.path.dirname(os.path.abspath(__file__)) + "/../../../.." # data resides in the CNTK folder
data_dir = cntk_dir + "/Examples/LanguageUnderstanding/ATIS/Data" # under Examples/LanguageUnderstanding/ATIS
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "Data")
vocab_size = 943 ; num_labels = 129 ; num_intents = 26 # number of words in vocab, slot labels, and intent labels
model_dir = "./Models"
@ -92,8 +91,8 @@ def train(reader, model, max_epochs):
# process minibatches and perform model training
cntk.utils.log_number_of_parameters(z) ; print()
progress_printer = cntk.ProgressPrinter(freq=100, first=10, tag='Training') # more detailed logging
#progress_printer = ProgressPrinter(tag='Training')
progress_printer = cntk.ProgressPrinter(freq=100, first=10, tag='Training', num_epochs=max_epochs) # more detailed logging
#progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
t = 0

Просмотреть файл

@ -139,7 +139,7 @@ def create_inputs(vocab_dim):
return input_sequence, label_sequence
# Creates and trains a character-level language model
def train_lm(training_file, max_num_minibatches):
def train_lm(training_file, epochs, max_num_minibatches):
# load the data and vocab
data, char_to_ix, ix_to_char, data_size, vocab_dim = load_data_and_vocab(training_file)
@ -168,46 +168,34 @@ def train_lm(training_file, max_num_minibatches):
trainer = Trainer(z, (ce, errs), learner)
sample_freq = 1000
epochs = 50
minibatches_per_epoch = int((data_size / minibatch_size))
minibatches = min(epochs * minibatches_per_epoch, max_num_minibatches)
minibatches_per_epoch = min(data_size // minibatch_size, max_num_minibatches // epochs)
# print out some useful training information
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(freq=100, tag='Training')
log_number_of_parameters(z)
print ("Running %d epochs with %d minibatches per epoch" % (epochs, minibatches_per_epoch))
print()
e = 0
p = 0
for i in range(0, minibatches):
if p + minibatch_size+1 >= data_size:
p = 0
e += 1
model_filename = "models/shakespeare_epoch%d.dnn" % e
z.save(model_filename)
print("Saved model to '%s'" % model_filename)
# get the data
features, labels = get_data(p, minibatch_size, data, char_to_ix, vocab_dim)
progress_printer = ProgressPrinter(freq=100, tag='Training')
for e in range(0, epochs):
# Specify the mapping of input variables in the model to actual minibatch data to be trained with
# If it's the start of the data, we specify that we are looking at a new sequence (True)
mask = [False]
if p == 0:
mask = [True]
arguments = ({input_sequence : features, label_sequence : labels}, mask)
trainer.train_minibatch(arguments)
mask = [True]
for b in range(0, minibatches_per_epoch):
# get the data
features, labels = get_data(b, minibatch_size, data, char_to_ix, vocab_dim)
arguments = ({input_sequence : features, label_sequence : labels}, mask)
mask = [False]
trainer.train_minibatch(arguments)
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
if i % sample_freq == 0:
print(sample(z, ix_to_char, vocab_dim, char_to_ix))
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
global_minibatch = e*minibatches_per_epoch + b
if global_minibatch % sample_freq == 0:
print(sample(z, ix_to_char, vocab_dim, char_to_ix))
p += minibatch_size
# Do a final save of the model
model_filename = "models/shakespeare_epoch%d.dnn" % e
z.save(model_filename)
model_filename = "models/shakespeare_epoch%d.dnn" % (e+1)
z.save_model(model_filename)
print("Saved model to '%s'" % model_filename)
def load_and_sample(model_filename, vocab_filename, prime_text='', use_hardmax=False, length=1000, temperature=1.0):
@ -223,13 +211,13 @@ def load_and_sample(model_filename, vocab_filename, prime_text='', use_hardmax=F
return sample(model, ix_to_char, len(chars), char_to_ix, prime_text=prime_text, use_hardmax=use_hardmax, length=length, temperature=temperature)
def train_and_eval_char_rnn(max_num_minibatches=sys.maxsize):
# train the LM
train_lm("data/tinyshakespeare.txt", max_num_minibatches)
def train_and_eval_char_rnn(epochs=50, max_num_minibatches=sys.maxsize):
# train the LM
train_lm("data/tinyshakespeare.txt", epochs, max_num_minibatches)
# load and sample
text = "T"
return load_and_sample("models/shakespeare_epoch0.dnn", "data/tinyshakespeare.txt.vocab", prime_text=text, use_hardmax=False, length=100, temperature=0.95)
return load_and_sample("models/shakespeare_epoch%d.dnn" % (epochs), "data/tinyshakespeare.txt.vocab", prime_text=text, use_hardmax=False, length=100, temperature=0.95)
if __name__=='__main__':
# Specify the target device to be used for computing, if you do not want to

Просмотреть файл

@ -23,7 +23,7 @@ from _cntk_py import set_computation_network_trace_level
# Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(abs_path, "..", "..", "Datasets", "UCF11")
data_path = os.path.join(abs_path, "..", "..", "DataSets", "UCF11")
model_path = os.path.join(abs_path, "Models")
# Define the reader for both training and evaluation action.
@ -194,14 +194,14 @@ def conv3d_ucf11(train_reader, test_reader, max_epochs=30):
lr_per_sample = [0.01]*10+[0.001]*10+[0.0001]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
momentum_time_constant = 4096
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant, epoch_size=epoch_size)
mm_schedule = momentum_as_time_constant_schedule([momentum_time_constant], epoch_size=epoch_size)
# Instantiate the trainer object to drive the model training
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, True)
trainer = Trainer(z, (ce, pe), learner)
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(tag='Training')
progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
# Get minibatches of images to train with and perform model training
for epoch in range(max_epochs): # loop over epochs

Просмотреть файл

@ -2374,7 +2374,9 @@ ElemType GPUMatrix<ElemType>::AbsoluteMax() const
int resInd = 0;
cublasIdamax(cuHandle, (CUDA_LONG)GetNumElements(), reinterpret_cast<double*>(Data()), 1, &resInd);
resInd--;
CUDA_CALL(cudaMemcpy(reinterpret_cast<double*>(&res), Data() + resInd, sizeof(double), cudaMemcpyDeviceToHost));
return res;
}
}

Просмотреть файл

@ -17,5 +17,5 @@ def test_char_rnn(device_id):
set_default_device(cntk_device(device_id))
# Just run and verify it does not crash
output = train_and_eval_char_rnn(200)
output = train_and_eval_char_rnn(1, 200)
print(output)

Просмотреть файл

@ -483,7 +483,7 @@
" }\n",
"\n",
" log_number_of_parameters(z) ; print()\n",
" progress_printer = ProgressPrinter(tag='Training')\n",
" progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)\n",
"\n",
" # perform model training\n",
" batch_index = 0\n",

Просмотреть файл

@ -437,8 +437,8 @@
"\n",
" # process minibatches and perform model training\n",
" log_number_of_parameters(model)\n",
" progress_printer = ProgressPrinter(tag='Training')\n",
" #progress_printer = ProgressPrinter(freq=100, first=10, tag='Training') # more detailed logging\n",
" progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)\n",
" #progress_printer = ProgressPrinter(freq=100, first=10, tag='Training', num_epochs=max_epochs) # more detailed logging\n",
"\n",
" t = 0\n",
" for epoch in range(max_epochs): # loop over epochs\n",
@ -560,7 +560,7 @@
" dummy_learner = adam_sgd(criterion.parameters, \n",
" lr=lr_schedule, momentum=momentum_as_time_constant, low_memory=True)\n",
" evaluator = Trainer(model, criterion, dummy_learner)\n",
" progress_printer = ProgressPrinter(tag='Evaluation')\n",
" progress_printer = ProgressPrinter(tag='Evaluation', num_epochs=0)\n",
"\n",
" while True:\n",
" minibatch_size = 500\n",