From acb97935e8cbd8acc392c413c4dc0c8009544a69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Big=20Data=20Tech=2E=20Lab=20=EC=9D=B4=ED=83=9C=ED=9B=88?= Date: Thu, 29 Jun 2017 11:53:52 +0900 Subject: [PATCH] Fix typos --- Examples/Evaluation/CSEvalClient/Program.cs | 4 ++-- Examples/SequenceToSequence/PennTreebank/README.md | 2 +- .../How_to_train_using_declarative_and_imperative_API.ipynb | 2 +- Source/CNTK/BrainScript/BrainScriptEvaluator.cpp | 2 +- Source/CNTKv2LibraryDll/API/CNTKLibrary.h | 2 +- Source/CNTKv2LibraryDll/Common.cpp | 2 +- Source/Common/Include/RandomOrdering.h | 2 +- Source/ComputationNetworkLib/TrainingNodes.cpp | 4 ++-- Source/ComputationNetworkLib/TrainingNodes.h | 2 +- Source/SGDLib/SGD.cpp | 2 +- Tests/EndToEndTests/UnitTests/MathTests/baseline.txt | 2 +- Tests/UnitTests/MathTests/GPUMatrixTests.cpp | 2 +- bindings/python/cntk/debugging/debug.py | 2 +- bindings/python/cntk/ops/__init__.py | 2 +- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Examples/Evaluation/CSEvalClient/Program.cs b/Examples/Evaluation/CSEvalClient/Program.cs index ef52d9948..67c325b0b 100644 --- a/Examples/Evaluation/CSEvalClient/Program.cs +++ b/Examples/Evaluation/CSEvalClient/Program.cs @@ -333,10 +333,10 @@ namespace Microsoft.MSR.CNTK.Extensibility.Managed.CSEvalClient // Specifies the number of times to iterate through the test file (epochs) int numRounds = 1; - // Counts the number of evaluations accross all models + // Counts the number of evaluations across all models int count = 0; - // Counts the number of failed evaluations (output != expected) accross all models + // Counts the number of failed evaluations (output != expected) across all models int errorCount = 0; // The examples assume the executable is running from the data folder diff --git a/Examples/SequenceToSequence/PennTreebank/README.md b/Examples/SequenceToSequence/PennTreebank/README.md index ecc3f8ff4..864fccaac 100644 --- a/Examples/SequenceToSequence/PennTreebank/README.md +++ b/Examples/SequenceToSequence/PennTreebank/README.md @@ -10,7 +10,7 @@ See License.md in the root level folder of the CNTK repository for full license ## Overview -|Data |The Penn Treebank Project (https://www.cis.upenn.edu/~treebank/) annotates naturally-occuring text for linguistic structure . +|Data |The Penn Treebank Project (https://www.cis.upenn.edu/~treebank/) annotates naturally-occurring text for linguistic structure . |:---------|:---| |Purpose |Showcase how to train a recurrent network for text data. |Network |SimpleNetworkBuilder for recurrent network with two hidden layers. diff --git a/Manual/How_to_train_using_declarative_and_imperative_API.ipynb b/Manual/How_to_train_using_declarative_and_imperative_API.ipynb index 98f8cee3a..2af2a4efb 100644 --- a/Manual/How_to_train_using_declarative_and_imperative_API.ipynb +++ b/Manual/How_to_train_using_declarative_and_imperative_API.ipynb @@ -473,7 +473,7 @@ "In order to report progress, please provide an instance of the [ProgressWriter](https://www.cntk.ai/pythondocs/cntk.logging.progress_print.html#module-cntk.logging.progress_print). It has its own set of parameters to control how often to print the loss value. If you need to have a custom logic for retrieving current status, please consider implementing your own ProgressWriter.\n", "\n", "### Checkpointing\n", - "[Checkpoint configuration](https://www.cntk.ai/pythondocs/cntk.train.training_session.html#cntk.train.training_session.CheckpointConfig) specifies how often to save a checkpoint to the given file. The checkpointing frequency is specified in samples. When given, the method takes care of saving/restoring the state accross the trainer/learners/minibatch source and propagating this information among distributed workers. If you need to preserve all checkpoints that were taken during training, please set `preserveAll` to true. \n", + "[Checkpoint configuration](https://www.cntk.ai/pythondocs/cntk.train.training_session.html#cntk.train.training_session.CheckpointConfig) specifies how often to save a checkpoint to the given file. The checkpointing frequency is specified in samples. When given, the method takes care of saving/restoring the state across the trainer/learners/minibatch source and propagating this information among distributed workers. If you need to preserve all checkpoints that were taken during training, please set `preserveAll` to true. \n", "\n", "### Validation\n", "When [cross validation](https://www.cntk.ai/pythondocs/cntk.train.training_session.html#cntk.train.training_session.CrossValidationConfig) config is given, the training session runs the validation on the specified minibatch source with the specified frequency and reports average metric error. The user can also provide a cross validation callback, that will be called with the specified frequency. It is up to the user to perform validation in the callback and return back `True` if the training should be continued, or `False` otherwise. \n", diff --git a/Source/CNTK/BrainScript/BrainScriptEvaluator.cpp b/Source/CNTK/BrainScript/BrainScriptEvaluator.cpp index 795a749f7..da6c88440 100644 --- a/Source/CNTK/BrainScript/BrainScriptEvaluator.cpp +++ b/Source/CNTK/BrainScript/BrainScriptEvaluator.cpp @@ -920,7 +920,7 @@ static wstring FormatConfigValue(ConfigValuePtr arg, const wstring &how); // StringFunction implements // - Format // - Chr(c) -- gives a string of one character with Unicode value 'c' -// - Replace(s,what,withwhat) -- replace all occurences of 'what' with 'withwhat' +// - Replace(s,what,withwhat) -- replace all occurrences of 'what' with 'withwhat' // - Substr(s,begin,num) -- get a substring // TODO: RegexReplace() class StringFunction : public String diff --git a/Source/CNTKv2LibraryDll/API/CNTKLibrary.h b/Source/CNTKv2LibraryDll/API/CNTKLibrary.h index df2444d17..032ffff25 100644 --- a/Source/CNTKv2LibraryDll/API/CNTKLibrary.h +++ b/Source/CNTKv2LibraryDll/API/CNTKLibrary.h @@ -35,7 +35,7 @@ namespace CNTK { /// /// Checked mode enables additional runtime verification such as: - /// - Tracking NaN occurences in sequence gaps. + /// - Tracking NaN occurrences in sequence gaps. /// - Function graph verification after binding of free static axes to actual values at runtime /// /// Enabling checked mode incurs additional runtime costs and is meant to be used as a debugging aid. diff --git a/Source/CNTKv2LibraryDll/Common.cpp b/Source/CNTKv2LibraryDll/Common.cpp index 5f205bb67..e072d24ce 100644 --- a/Source/CNTKv2LibraryDll/Common.cpp +++ b/Source/CNTKv2LibraryDll/Common.cpp @@ -75,7 +75,7 @@ namespace CNTK // This is used to generate a default seed value for random parameter initializer and also // for stateful nodes (dropout, and both flavors of random sample). The 'perWorkerLocalValue' flag - // indicates if the generated value should be identical accross individual workers in distributed + // indicates if the generated value should be identical across individual workers in distributed // setting or if each worker should get a different seed value. size_t GenerateRandomSeed(bool perWorkerLocalValue /*= false*/) { diff --git a/Source/Common/Include/RandomOrdering.h b/Source/Common/Include/RandomOrdering.h index 701442c01..eaa6fac5c 100644 --- a/Source/Common/Include/RandomOrdering.h +++ b/Source/Common/Include/RandomOrdering.h @@ -27,7 +27,7 @@ static inline size_t rand(const size_t begin, const size_t end) // Rand based on Mersenne Twister. // We use our own distribution in order to match baselines between different operating systems, -// because uniform_distribution is not guranteed to provide the same numbers on different platforms. +// because uniform_distribution is not guaranteed to provide the same numbers on different platforms. // TODO: Switching to Boost would eliminate this problem. static inline size_t RandMT(const size_t begin, const size_t end, std::mt19937_64& rng) { diff --git a/Source/ComputationNetworkLib/TrainingNodes.cpp b/Source/ComputationNetworkLib/TrainingNodes.cpp index ea900c061..715ad6ee7 100644 --- a/Source/ComputationNetworkLib/TrainingNodes.cpp +++ b/Source/ComputationNetworkLib/TrainingNodes.cpp @@ -247,7 +247,7 @@ double RandomSampleInclusionFrequencyNode::EstimateNumberOfTries() return totalTries / (double)numExperiments; } -// Estimates the expected number of occurences of each class in the sampled set. +// Estimates the expected number of occurrences of each class in the sampled set. // For sampling without replacement we use estimate using average number of tries. (Inspired by TensorFlow) // BUGBUG: Consider to reimplement using a less biased estimate as proposed by Nikos. template @@ -340,4 +340,4 @@ template class DropoutNode; template class BatchNormalizationNode; template class BatchNormalizationNode; -}}} \ No newline at end of file +}}} diff --git a/Source/ComputationNetworkLib/TrainingNodes.h b/Source/ComputationNetworkLib/TrainingNodes.h index aa8a2deb1..795b2adef 100644 --- a/Source/ComputationNetworkLib/TrainingNodes.h +++ b/Source/ComputationNetworkLib/TrainingNodes.h @@ -1447,7 +1447,7 @@ public: virtual void /*ComputationNode::*/ ForwardPropNonLooping() override; virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override; private: - // Approximates the expected number of occurences of a class in the sampled set. + // Approximates the expected number of occurrences of a class in the sampled set. // Assuming (falsely) that the number of tries to get a sampled set with the requested number of distinct values is always estimatedNumTries // the probability that a specific class in the sampled set is (1 - (1-p)^estimatedNumTries), where p is the probablity to pick the clas in one draw. // The estimate can be quite a bit off but should be better than nothing. Better alternatives? diff --git a/Source/SGDLib/SGD.cpp b/Source/SGDLib/SGD.cpp index ed64f59c5..cc02c606c 100644 --- a/Source/SGDLib/SGD.cpp +++ b/Source/SGDLib/SGD.cpp @@ -1283,7 +1283,7 @@ size_t SGD::TrainOneEpoch(ComputationNetworkPtr net, // independent of their actual content (which is considered outdated). // Sum of actualMBSize across all nodes when using parallel training - // 'aggregate' here means accross-worker aggregate for this one minibatch. + // 'aggregate' here means across-worker aggregate for this one minibatch. size_t aggregateNumSamples = actualMBSize; // (0 for empty MB) size_t aggregateNumSamplesWithLabel = CriterionAccumulator::GetNumSamples(criterionNodes[0], numSamplesWithLabelOfNetwork); // (0 for empty MB) diff --git a/Tests/EndToEndTests/UnitTests/MathTests/baseline.txt b/Tests/EndToEndTests/UnitTests/MathTests/baseline.txt index b1498b7a5..88c5fa658 100644 --- a/Tests/EndToEndTests/UnitTests/MathTests/baseline.txt +++ b/Tests/EndToEndTests/UnitTests/MathTests/baseline.txt @@ -412,7 +412,7 @@ Test module "MathTests" has passed with: Test case "GPUMatrixSuite/GPUBlasInnerProduct" has passed with: 2 assertions out of 2 passed - Test case "GPUMatrixSuite/MatrixCopyAssignAccrossDevices" has passed + Test case "GPUMatrixSuite/MatrixCopyAssignAcrossDevices" has passed Test case "GPUMatrixSuite/GPUMatrixConstructorNoFlag" has passed with: 6 assertions out of 6 passed diff --git a/Tests/UnitTests/MathTests/GPUMatrixTests.cpp b/Tests/UnitTests/MathTests/GPUMatrixTests.cpp index 059e5eb75..387e2ceb6 100755 --- a/Tests/UnitTests/MathTests/GPUMatrixTests.cpp +++ b/Tests/UnitTests/MathTests/GPUMatrixTests.cpp @@ -15,7 +15,7 @@ namespace Microsoft { namespace MSR { namespace CNTK { namespace Test { BOOST_AUTO_TEST_SUITE(GPUMatrixSuite) -BOOST_FIXTURE_TEST_CASE(MatrixCopyAssignAccrossDevices, RandomSeedFixture) +BOOST_FIXTURE_TEST_CASE(MatrixCopyAssignAcrossDevices, RandomSeedFixture) { bool hasTwoGpus = false; #ifndef CPUONLY diff --git a/bindings/python/cntk/debugging/debug.py b/bindings/python/cntk/debugging/debug.py index 74310fe8f..264406e32 100644 --- a/bindings/python/cntk/debugging/debug.py +++ b/bindings/python/cntk/debugging/debug.py @@ -107,7 +107,7 @@ class _DebugState(object): def set_checked_mode(enable): ''' Checked mode enables additional runtime verification such as: - - Tracking NaN occurences in sequence gaps. + - Tracking NaN occurrences in sequence gaps. - Function graph verification after binding of free static axes to actual values at runtime Enabling checked mode incurs additional runtime costs and is meant to be used as a debugging aid. diff --git a/bindings/python/cntk/ops/__init__.py b/bindings/python/cntk/ops/__init__.py index 9753e3acf..336e0b191 100755 --- a/bindings/python/cntk/ops/__init__.py +++ b/bindings/python/cntk/ops/__init__.py @@ -2467,7 +2467,7 @@ def random_sample_inclusion_frequency( name=''): ''' For weighted sampling with the specifed sample size (`num_samples`) - this operation computes the expected number of occurences of each class + this operation computes the expected number of occurrences of each class in the sampled set. In case of sampling without replacement the result is only an estimate which might be quite rough in the case of small sample sizes.