зеркало из https://github.com/Azure/blobporter.git
-doc updates
This commit is contained in:
Родитель
016c9a1f49
Коммит
0a0ca98710
6
args.go
6
args.go
|
@ -299,7 +299,7 @@ func (p *paramParserValidator) pvgDupCheck() error {
|
|||
var err error
|
||||
p.params.dedupeLevel, err = transfer.ParseDupeCheckLevel(p.args.dedupeLevelOptStr)
|
||||
if err != nil {
|
||||
fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err)
|
||||
return fmt.Errorf("Duplicate detection level is invalid. Found '%s', must be one of %s. Error:%v", p.args.dedupeLevelOptStr, transfer.DupeCheckLevelStr, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -403,7 +403,7 @@ func (p *paramParserValidator) pvPerfSourceIsReq() error {
|
|||
func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error {
|
||||
|
||||
if len(p.params.blobSource.prefixes) == 0 {
|
||||
//if empty set an empty prefix so the entire container is downlaoded..
|
||||
//if empty set an empty prefix so the entire container is downloaded..
|
||||
p.params.blobSource.prefixes = []string{""}
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,7 @@ func (p *paramParserValidator) pvSetEmptyPrefixIfNone() error {
|
|||
|
||||
//this rule checks if the transfer type is blob to file (download). Im which case blob authorization rule also aplies since
|
||||
//there are two combinations of param line options that can be provided. One, similar to upload, where the source is main
|
||||
// storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occurr.
|
||||
// storage account that is the target in all other cases. And the second with the URI provided, as when blob to blob transfers occur.
|
||||
func (p *paramParserValidator) pvSourceInfoForBlobIsReq() error {
|
||||
|
||||
//if the scenarios is download then check if download is via short-mode
|
||||
|
|
|
@ -8,11 +8,11 @@ import (
|
|||
)
|
||||
|
||||
//There're two components here: poolHandle and the handle factory.
|
||||
//A pool is an asynchronous request/respone worker that runs on a single go-routine and keeps file handles for each file.
|
||||
//A pool is an asynchronous request/response worker that runs on a single go-routine and keeps file handles for each file.
|
||||
//The number of file handles is constraint by the max number of handlers in cache (maxFileHandlesInCache) and the max number of handles per file (numOfHandlesPerFile).
|
||||
//When the max number handles is reached file handles will be closed until space is available. The handle factory opens the file handles and initializes the
|
||||
//target file in case the folder structure and file need to be created. Since the factory tracks if a file has been initailized
|
||||
//, i.e. created or truncated at the begining of the transfer, only one instance of the factory is created.
|
||||
//, i.e. created or truncated at the beginning of the transfer, only one instance of the factory is created.
|
||||
|
||||
const maxFileHandlesInCache int = 600
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ func ConstructPartsPartition(numberOfPartitions int, size int64, blockSize int64
|
|||
partition.Parts = parts
|
||||
partition.NumOfParts = numOfPartsInPartition
|
||||
Partitions[p] = partition
|
||||
bytesLeft = bytesLeft - int64(partitionSize)
|
||||
bytesLeft = bytesLeft - int64(partitionSize)
|
||||
}
|
||||
|
||||
return Partitions
|
||||
|
|
|
@ -194,7 +194,7 @@ func (f *HTTPSource) ExecuteReader(partitionsQ chan pipeline.PartsPartition, par
|
|||
req.Header.Set("User-Agent", userAgent)
|
||||
|
||||
//set the close header only when the block is larger than the blob
|
||||
//to minimize the number of open when transfering small files.
|
||||
//to minimize the number of open when transferring small files.
|
||||
if p.BytesToRead < p.BlockSize {
|
||||
req.Close = true
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func (t *AzureBlockTarget) CommitList(listInfo *pipeline.TargetCommittedListInfo
|
|||
|
||||
//Only commit if the number blocks is greater than one.
|
||||
if numberOfBlocks == 1 {
|
||||
msg = fmt.Sprintf("\rFile:%v, The blob is already comitted.",
|
||||
msg = fmt.Sprintf("\rFile:%v, The blob is already committed.",
|
||||
targetName)
|
||||
err = nil
|
||||
return
|
||||
|
|
|
@ -61,7 +61,7 @@ func (t *AzurePageTarget) PreProcessSourceInfo(source *pipeline.SourceInfo, bloc
|
|||
}
|
||||
|
||||
if blockSize > maxPageSize || blockSize < PageSize {
|
||||
return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", PageSize, maxPageSize)
|
||||
return fmt.Errorf(" invalid block size for page blob: %v. The value must be greater than %v and less than %v", source.SourceName, PageSize, maxPageSize)
|
||||
}
|
||||
|
||||
err = t.azUtil.CreatePageBlob(source.TargetAlias, size)
|
||||
|
|
|
@ -105,7 +105,7 @@ const (
|
|||
NA = "na"
|
||||
)
|
||||
|
||||
//ParseTransferSegment
|
||||
//ParseTransferSegment TODO
|
||||
func ParseTransferSegment(def Definition) (TransferSegment, TransferSegment) {
|
||||
//defstr := string(def)
|
||||
|
||||
|
|
|
@ -34,8 +34,6 @@ func NewAzUtil(accountName string, accountKey string, container string, baseBlob
|
|||
RetryDelay: 200 * time.Millisecond,
|
||||
MaxRetryDelay: 5 * time.Minute}})
|
||||
|
||||
|
||||
|
||||
baseURL, err := parseBaseURL(accountName, baseBlobURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -245,6 +245,7 @@ func isValidContainerName(name string) bool {
|
|||
}
|
||||
|
||||
var storageHTTPClient *http.Client
|
||||
|
||||
//HTTPClientTimeout HTTP timeout of the HTTP client used by the storage client.
|
||||
var HTTPClientTimeout = 60
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче