This commit is contained in:
Jesus Aguilar 2018-02-12 18:12:50 -05:00
Родитель 016c9a1f49
Коммит 0a0ca98710
9 изменённых файлов: 11 добавлений и 12 удалений

Просмотреть файл

@ -299,7 +299,7 @@ func (p *paramParserValidator) pvgDupCheck() error {
var err error var err error
p.params.dedupeLevel, err = transfer.ParseDupeCheckLevel(p.args.dedupeLevelOptStr) p.params.dedupeLevel, err = transfer.ParseDupeCheckLevel(p.args.dedupeLevelOptStr)
if err != nil { if err != nil {
fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err) return fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err)
} }
return nil return nil
@ -403,7 +403,7 @@ func (p *paramParserValidator) pvPerfSourceIsReq() error {
func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error { func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error {
if len(p.params.blobSource.prefixes) == 0 { if len(p.params.blobSource.prefixes) == 0 {
//if empty set an empty prefix so the entire container is downlaoded.. //if empty set an empty prefix so the entire container is downloaded..
p.params.blobSource.prefixes = []string{""} p.params.blobSource.prefixes = []string{""}
} }
@ -412,7 +412,7 @@ func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error {
//this rule checks if the transfer type is blob to file (download). Im which case blob authorization rule also aplies since //this rule checks if the transfer type is blob to file (download). Im which case blob authorization rule also aplies since
//there are two combinations of param line options that can be provided. One, similar to upload, where the source is main //there are two combinations of param line options that can be provided. One, similar to upload, where the source is main
// storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occurr. // storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occur.
func (p *paramParserValidator) pvSourceInfoForBlobIsReq() error { func (p *paramParserValidator) pvSourceInfoForBlobIsReq() error {
//if the scenarios is download then check if download is via short-mode //if the scenarios is download then check if download is via short-mode

Просмотреть файл

@ -8,11 +8,11 @@ import (
) )
//There're two components here: poolHandle and the handle factory. //There're two components here: poolHandle and the handle factory.
//A pool is an asynchronous request/respone worker that runs on a single go-routine and keeps file handles for each file. //A pool is an asynchronous request/response worker that runs on a single go-routine and keeps file handles for each file.
//The number of file handles is constraint by the max number of handlers in cache (maxFileHandlesInCache) and the max number of handles per file (numOfHandlesPerFile). //The number of file handles is constraint by the max number of handlers in cache (maxFileHandlesInCache) and the max number of handles per file (numOfHandlesPerFile).
//When the max number handles is reached file handles will be closed until space is available. The handle factory opens the file handles and initializes the //When the max number handles is reached file handles will be closed until space is available. The handle factory opens the file handles and initializes the
//target file in case the folder structure and file need to be created. Since the factory tracks if a file has been initailized //target file in case the folder structure and file need to be created. Since the factory tracks if a file has been initailized
//, i.e. created or truncated at the begining of the transfer, only one instance of the factory is created. //, i.e. created or truncated at the beginning of the transfer, only one instance of the factory is created.
const maxFileHandlesInCache int = 600 const maxFileHandlesInCache int = 600

Просмотреть файл

@ -183,7 +183,7 @@ func ConstructPartsPartition(numberOfPartitions int, size int64, blockSize int64
partition.Parts = parts partition.Parts = parts
partition.NumOfParts = numOfPartsInPartition partition.NumOfParts = numOfPartsInPartition
Partitions[p] = partition Partitions[p] = partition
bytesLeft = bytesLeft - int64(partitionSize) bytesLeft = bytesLeft - int64(partitionSize)
} }
return Partitions return Partitions

Просмотреть файл

@ -194,7 +194,7 @@ func (f *HTTPSource) ExecuteReader(partitionsQ chan pipeline.PartsPartition, par
req.Header.Set("User-Agent", userAgent) req.Header.Set("User-Agent", userAgent)
//set the close header only when the block is larger than the blob //set the close header only when the block is larger than the blob
//to minimize the number of open when transfering small files. //to minimize the number of open when transferring small files.
if p.BytesToRead < p.BlockSize { if p.BytesToRead < p.BlockSize {
req.Close = true req.Close = true
} }

Просмотреть файл

@ -50,7 +50,7 @@ func (t *AzureBlockTarget) CommitList(listInfo *pipeline.TargetCommittedListInfo
//Only commit if the number blocks is greater than one. //Only commit if the number blocks is greater than one.
if numberOfBlocks == 1 { if numberOfBlocks == 1 {
msg = fmt.Sprintf("\rFile:%v, The blob is already comitted.", msg = fmt.Sprintf("\rFile:%v, The blob is already committed.",
targetName) targetName)
err = nil err = nil
return return

Просмотреть файл

@ -61,7 +61,7 @@ func (t *AzurePageTarget) PreProcessSourceInfo(source *pipeline.SourceInfo, bloc
} }
if blockSize > maxPageSize || blockSize < PageSize { if blockSize > maxPageSize || blockSize < PageSize {
return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", PageSize, maxPageSize) return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", source.SourceName, PageSize, maxPageSize)
} }
err = t.azUtil.CreatePageBlob(source.TargetAlias, size) err = t.azUtil.CreatePageBlob(source.TargetAlias, size)

Просмотреть файл

@ -105,7 +105,7 @@ const (
NA = "na" NA = "na"
) )
//ParseTransferSegment //ParseTransferSegment TODO
func ParseTransferSegment(def Definition) (TransferSegment, TransferSegment) { func ParseTransferSegment(def Definition) (TransferSegment, TransferSegment) {
//defstr := string(def) //defstr := string(def)

Просмотреть файл

@ -34,8 +34,6 @@ func NewAzUtil(accountName string, accountKey string, container string, baseBlob
RetryDelay: 200 * time.Millisecond, RetryDelay: 200 * time.Millisecond,
MaxRetryDelay: 5 * time.Minute}}) MaxRetryDelay: 5 * time.Minute}})
baseURL, err := parseBaseURL(accountName, baseBlobURL) baseURL, err := parseBaseURL(accountName, baseBlobURL)
if err != nil { if err != nil {
return nil, err return nil, err

Просмотреть файл

@ -245,6 +245,7 @@ func isValidContainerName(name string) bool {
} }
var storageHTTPClient *http.Client var storageHTTPClient *http.Client
//HTTPClientTimeout HTTP timeout of the HTTP client used by the storage client. //HTTPClientTimeout HTTP timeout of the HTTP client used by the storage client.
var HTTPClientTimeout = 60 var HTTPClientTimeout = 60