diff --git a/args.go b/args.go index 2581685..f5030ff 100644 --- a/args.go +++ b/args.go @@ -299,7 +299,7 @@ func (p *paramParserValidator) pvgDupCheck() error { var err error p.params.dedupeLevel, err = transfer.ParseDupeCheckLevel(p.args.dedupeLevelOptStr) if err != nil { - fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err) + return fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err) } return nil @@ -403,7 +403,7 @@ func (p *paramParserValidator) pvPerfSourceIsReq() error { func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error { if len(p.params.blobSource.prefixes) == 0 { - //if empty set an empty prefix so the entire container is downlaoded.. + //if empty set an empty prefix so the entire container is downloaded.. p.params.blobSource.prefixes = []string{""} } @@ -412,7 +412,7 @@ func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error { //this rule checks if the transfer type is blob to file (download). Im which case blob authorization rule also aplies since //there are two combinations of param line options that can be provided. One, similar to upload, where the source is main -// storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occurr. +// storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occur. func (p *paramParserValidator) pvSourceInfoForBlobIsReq() error { //if the scenarios is download then check if download is via short-mode diff --git a/internal/handleman.go b/internal/handleman.go index 734f4bb..7bd7b01 100644 --- a/internal/handleman.go +++ b/internal/handleman.go @@ -8,11 +8,11 @@ import ( ) //There're two components here: poolHandle and the handle factory. -//A pool is an asynchronous request/respone worker that runs on a single go-routine and keeps file handles for each file. +//A pool is an asynchronous request/response worker that runs on a single go-routine and keeps file handles for each file. //The number of file handles is constraint by the max number of handlers in cache (maxFileHandlesInCache) and the max number of handles per file (numOfHandlesPerFile). //When the max number handles is reached file handles will be closed until space is available. The handle factory opens the file handles and initializes the //target file in case the folder structure and file need to be created. Since the factory tracks if a file has been initailized -//, i.e. created or truncated at the begining of the transfer, only one instance of the factory is created. +//, i.e. created or truncated at the beginning of the transfer, only one instance of the factory is created. const maxFileHandlesInCache int = 600 diff --git a/pipeline/pipeline.go b/pipeline/pipeline.go index 1fde0ec..6197945 100644 --- a/pipeline/pipeline.go +++ b/pipeline/pipeline.go @@ -183,7 +183,7 @@ func ConstructPartsPartition(numberOfPartitions int, size int64, blockSize int64 partition.Parts = parts partition.NumOfParts = numOfPartsInPartition Partitions[p] = partition - bytesLeft = bytesLeft - int64(partitionSize) + bytesLeft = bytesLeft - int64(partitionSize) } return Partitions diff --git a/sources/http.go b/sources/http.go index 7591840..8d745ad 100644 --- a/sources/http.go +++ b/sources/http.go @@ -194,7 +194,7 @@ func (f *HTTPSource) ExecuteReader(partitionsQ chan pipeline.PartsPartition, par req.Header.Set("User-Agent", userAgent) //set the close header only when the block is larger than the blob - //to minimize the number of open when transfering small files. + //to minimize the number of open when transferring small files. if p.BytesToRead < p.BlockSize { req.Close = true } diff --git a/targets/azureblock.go b/targets/azureblock.go index d46a83e..7c6f796 100644 --- a/targets/azureblock.go +++ b/targets/azureblock.go @@ -50,7 +50,7 @@ func (t *AzureBlockTarget) CommitList(listInfo *pipeline.TargetCommittedListInfo //Only commit if the number blocks is greater than one. if numberOfBlocks == 1 { - msg = fmt.Sprintf("\rFile:%v, The blob is already comitted.", + msg = fmt.Sprintf("\rFile:%v, The blob is already committed.", targetName) err = nil return diff --git a/targets/azurepage.go b/targets/azurepage.go index 6fd96bf..ab236cb 100644 --- a/targets/azurepage.go +++ b/targets/azurepage.go @@ -61,7 +61,7 @@ func (t *AzurePageTarget) PreProcessSourceInfo(source *pipeline.SourceInfo, bloc } if blockSize > maxPageSize || blockSize < PageSize { - return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", PageSize, maxPageSize) + return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", source.SourceName, PageSize, maxPageSize) } err = t.azUtil.CreatePageBlob(source.TargetAlias, size) diff --git a/transfer/transfer.go b/transfer/transfer.go index ebefde8..e21ee15 100644 --- a/transfer/transfer.go +++ b/transfer/transfer.go @@ -105,7 +105,7 @@ const ( NA = "na" ) -//ParseTransferSegment +//ParseTransferSegment TODO func ParseTransferSegment(def Definition) (TransferSegment, TransferSegment) { //defstr := string(def) diff --git a/util/azutil.go b/util/azutil.go index aec362b..358414c 100644 --- a/util/azutil.go +++ b/util/azutil.go @@ -34,8 +34,6 @@ func NewAzUtil(accountName string, accountKey string, container string, baseBlob RetryDelay: 200 * time.Millisecond, MaxRetryDelay: 5 * time.Minute}}) - - baseURL, err := parseBaseURL(accountName, baseBlobURL) if err != nil { return nil, err diff --git a/util/util.go b/util/util.go index c153084..8a917ce 100644 --- a/util/util.go +++ b/util/util.go @@ -245,6 +245,7 @@ func isValidContainerName(name string) bool { } var storageHTTPClient *http.Client + //HTTPClientTimeout HTTP timeout of the HTTP client used by the storage client. var HTTPClientTimeout = 60