Merge remote-tracking branch 'origin/master' into dev

This commit is contained in:
Ze Qian Zhang 2022-04-26 00:15:48 -07:00
Родитель fcec3a32c3 b32b5e3579
Коммит a5410061cf
8 изменённых файлов: 12553 добавлений и 15 удалений

Просмотреть файл

@ -1,5 +1,9 @@
# Azure Storage Blob SDK for Go (PREVIEW) # Azure Storage Blob SDK for Go (PREVIEW)
[![GoDoc Widget]][GoDoc] [![Build Status][Travis Widget]][Travis] [![GoDoc Widget]][GoDoc]
## If you would like to access our latest Go SDK, please refer to the new preview azblob package [here](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme). If you would like more information on Azure's burgeoning effort to coordinate the development of the SDKs across services, of which this change is a part, please refer to [this article](https://azure.microsoft.com/en-us/blog/previewing-azure-sdks-following-new-azure-sdk-api-standards/).
## We will continue to respond to issues here, but prefer that you post them on the [new repo](https://github.com/Azure/azure-sdk-for-go). Thank you for your patience. We look forward to continuing to work together with you.
The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud storage. The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud storage.

Просмотреть файл

@ -166,12 +166,12 @@ func (c *copier) write(chunk copierChunk) {
if err := c.ctx.Err(); err != nil { if err := c.ctx.Err(); err != nil {
return return
} }
_, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions) _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions)
if err != nil { if err != nil {
c.errCh <- fmt.Errorf("write error: %w", err) c.errCh <- fmt.Errorf("write error: %w", err)
return return
} }
return
} }
// close commits our blocks to blob storage and closes our writer. // close commits our blocks to blob storage and closes our writer.

Просмотреть файл

@ -77,7 +77,7 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
} }
func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string { func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string {
if blobTagsMap == nil { if len(blobTagsMap) == 0 {
return nil return nil
} }
tags := make([]string, 0) tags := make([]string, 0)
@ -90,7 +90,7 @@ func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string {
} }
func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags { func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags {
if blobTagsMap == nil { if len(blobTagsMap) == 0 {
return BlobTags{} return BlobTags{}
} }
blobTagSet := make([]BlobTag, 0, len(blobTagsMap)) blobTagSet := make([]BlobTag, 0, len(blobTagsMap))

Просмотреть файл

@ -123,7 +123,10 @@ func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []Lis
if o.MaxResults != 0 { if o.MaxResults != 0 {
maxResults = &o.MaxResults maxResults = &o.MaxResults
} }
include = []ListContainersIncludeType{ListContainersIncludeType(o.Detail.string())} details := o.Detail.string()
if len(details) > 0 {
include = []ListContainersIncludeType{ListContainersIncludeType(details)}
}
return return
} }

Просмотреть файл

@ -161,7 +161,7 @@ func buildCanonicalizedHeader(headers http.Header) string {
ch.WriteRune(':') ch.WriteRune(':')
ch.WriteString(strings.Join(cm[key], ",")) ch.WriteString(strings.Join(cm[key], ","))
} }
return string(ch.Bytes()) return ch.String()
} }
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
@ -201,5 +201,5 @@ func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
} }
} }
return string(cr.Bytes()), nil return cr.String(), nil
} }

Просмотреть файл

@ -38,7 +38,7 @@ func tokenCredentialPointers(credential TokenCredential) *string {
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration // TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. // indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your // If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a // TokenCredential object from ever invoking tokenRefresher again. Also, one way to deal with failing to refresh a
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. // token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
tc := &tokenCredential{} tc := &tokenCredential{}

Просмотреть файл

@ -181,25 +181,25 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
} }
// Set the server-side timeout query parameter "timeout=[seconds]" // Set the server-side timeout query parameter "timeout=[seconds]"
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try timeout := o.TryTimeout // Max time per try
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline t := deadline.Sub(time.Now()) // Duration from now until user's ctx reaches its deadline
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", int32(timeout.Seconds()), int32(t.Seconds()))
if t < timeout { if t < timeout {
timeout = t timeout = t
} }
if timeout < 0 { if timeout < 0 {
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
} }
logf("TryTimeout adjusted to=%d sec\n", timeout) logf("TryTimeout adjusted to=%d sec\n", int32(timeout.Seconds()))
} }
q := requestCopy.Request.URL.Query() q := requestCopy.Request.URL.Query()
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" q.Set("timeout", strconv.Itoa(int(timeout.Seconds()+1))) // Add 1 to "round up"
requestCopy.Request.URL.RawQuery = q.Encode() requestCopy.Request.URL.RawQuery = q.Encode()
logf("Url=%s\n", requestCopy.Request.URL.String()) logf("Url=%s\n", requestCopy.Request.URL.String())
// Set the time for this particular retry operation and then Do the operation. // Set the time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) tryCtx, tryCancel := context.WithTimeout(ctx, timeout)
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
response, err = next.Do(tryCtx, requestCopy) // Make the request response, err = next.Do(tryCtx, requestCopy) // Make the request
/*err = improveDeadlineExceeded(err) /*err = improveDeadlineExceeded(err)

12531
swagger/blob.json Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу