Merge pull request #220 from Azure/dev

Merge dev to master
This commit is contained in:
Mohit Sharma 2020-10-28 17:36:46 +05:30 коммит произвёл GitHub
Родитель 48358e1de5 0a7bd7e52f
Коммит 456ab4777f
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
46 изменённых файлов: 6118 добавлений и 1116 удалений

Просмотреть файл

@ -1,10 +0,0 @@
language: go
go:
- "1.13"
script:
- export GO111MODULE=on
- GOOS=linux go build ./azblob
- GOOS=darwin go build ./azblob
- GOOS=windows go build ./azblob
- GOOS=solaris go build ./azblob
- go test -race -short -cover -v ./azblob

Просмотреть файл

@ -2,6 +2,16 @@
> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.
## Version 0.11.0:
- Added support for the service version [`2019-12-12`](https://docs.microsoft.com/en-us/rest/api/storageservices/versioning-for-the-azure-storage-services).
- Added [Get Blob Tags](https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags) and [Set Blob Tags](https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags) APIs which allow user-defined tags to be added to a blob which then act as a secondary index.
- Added [Find Blobs by Tags](https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags) API which allow blobs to be retrieved based upon their tags.
- The maximum size of a block uploaded via [Put Block](https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#remarks) has been increased to 4 GiB (4000 MiB). This means that the maximum size of a block blob is now approximately 200 TiB.
- The maximum size for a blob uploaded through [Put Blob](https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks) has been increased to 5 GiB (5000 MiB).
- Added Blob APIs to support [Blob Versioning](https://docs.microsoft.com/en-us/azure/storage/blobs/versioning-overview) feature.
- Added support for setting blob tier directly at the time of blob creation instead of separate [Set Blob Tier](https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier) API call.
- Added [Get Page Range Diff](https://docs.microsoft.com/rest/api/storageservices/get-page-ranges) API to get the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk.
## Version 0.10.0:
- Added support for CopyBlobFromURL (sync) and upgrade version to 2019-02-02.
- Provided default values for UploadStreamToBlockBlobOptions and refactored UploadStreamToBlockBlob.

271
Gopkg.lock сгенерированный
Просмотреть файл

@ -1,271 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:6b1426cad7057b717351eacf5b6fe70f053f11aac1ce254bbf2fd72c031719eb"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = "UT"
revision = "dcb33c7f3b7cfe67e8a2cea10207ede1b7c40764"
version = "v0.4.12"
[[projects]]
digest = "1:602649ff074ccee9273e1d3b25c4069f13a70fa0c232957c7d68a6f02fb7a9ea"
name = "github.com/Azure/azure-pipeline-go"
packages = ["pipeline"]
pruneopts = "UT"
revision = "105d6349faa1dec531c0b932b5863540c1f6aafb"
version = "v0.2.1"
[[projects]]
digest = "1:d5800d9f8f0d48f84a2a45adeca9eee0e129f7d80b5c3d9770e90a4e5162058b"
name = "github.com/Azure/go-autorest"
packages = [
"autorest/adal",
"autorest/date",
"tracing",
]
pruneopts = "UT"
revision = "09205e8f6711a776499a14cf8adc6bd380db5d81"
version = "v12.2.0"
[[projects]]
digest = "1:fdb4ed936abeecb46a8c27dcac83f75c05c87a46d9ec7711411eb785c213fa02"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/metrics/v1",
"gen-go/agent/trace/v1",
"gen-go/metrics/v1",
"gen-go/resource/v1",
"gen-go/trace/v1",
]
pruneopts = "UT"
revision = "a105b96453fe85139acc07b68de48f2cbdd71249"
version = "v0.2.0"
[[projects]]
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
pruneopts = "UT"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
digest = "1:489a99067cd08971bd9c1ee0055119ba8febc1429f9200ab0bec68d35e8c4833"
name = "github.com/golang/protobuf"
packages = [
"jsonpb",
"proto",
"protoc-gen-go/descriptor",
"protoc-gen-go/generator",
"protoc-gen-go/generator/internal/remap",
"protoc-gen-go/plugin",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/struct",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = "UT"
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
version = "v1.3.1"
[[projects]]
digest = "1:c20c9a82345346a19916a0086e61ea97425172036a32b8a8975490da6a129fda"
name = "github.com/grpc-ecosystem/grpc-gateway"
packages = [
"internal",
"runtime",
"utilities",
]
pruneopts = "UT"
revision = "cd0c8ef3533e9c04e6520cac37a81fe262fb0b34"
version = "v1.9.2"
[[projects]]
digest = "1:67474f760e9ac3799f740db2c489e6423a4cde45520673ec123ac831ad849cb8"
name = "github.com/hashicorp/golang-lru"
packages = ["simplelru"]
pruneopts = "UT"
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
version = "v0.5.1"
[[projects]]
branch = "master"
digest = "1:f1df16c368a97edecc18c8c061c278cb6a342450bb83d5da4738e5b330abd522"
name = "github.com/mattn/go-ieproxy"
packages = ["."]
pruneopts = "UT"
revision = "91bb50d981495aef1c208d31be3d77d904384f20"
[[projects]]
digest = "1:4c93890bbbb5016505e856cb06b5c5a2ff5b7217584d33f2a9071ebef4b5d473"
name = "go.opencensus.io"
packages = [
".",
"internal",
"internal/tagencoding",
"metric/metricdata",
"metric/metricproducer",
"plugin/ocgrpc",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"resource",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = "UT"
revision = "43463a80402d8447b7fce0d2c58edf1687ff0b58"
version = "v0.19.3"
[[projects]]
branch = "master"
digest = "1:8f690c88cafc94f162d91fb3eaa1d9826f24c2f86ee7ea46c16bc0a3d3846c19"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http/httpproxy",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = "UT"
revision = "da137c7871d730100384dbcf36e6f8fa493aef5b"
[[projects]]
branch = "master"
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = "UT"
revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
[[projects]]
branch = "master"
digest = "1:2c770d8251a8a2127b648f57602d75c8e40457ba070b57b38176013472f31326"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
"windows/registry",
]
pruneopts = "UT"
revision = "04f50cda93cbb67f2afa353c52f342100e80e625"
[[projects]]
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/language",
"internal/language/compact",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = "UT"
revision = "02490b97dff7cfde1995bd77de808fd27053bc87"
version = "v0.7.0"
[[projects]]
branch = "master"
digest = "1:3565a93b7692277a5dea355bc47bd6315754f3246ed07a224be6aec28972a805"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/httpbody",
"googleapis/rpc/status",
"protobuf/field_mask",
]
pruneopts = "UT"
revision = "eb59cef1c072c61ea4f7623910448d5e9c6a4455"
[[projects]]
digest = "1:e8800ddadd6bce3bc0c5ffd7bc55dbdddc6e750956c10cc10271cade542fccbe"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"binarylog/grpc_binarylog_v1",
"codes",
"connectivity",
"credentials",
"credentials/internal",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/balancerload",
"internal/binarylog",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/grpcsync",
"internal/syscall",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = "UT"
revision = "501c41df7f472c740d0674ff27122f3f48c80ce7"
version = "v1.21.1"
[[projects]]
branch = "v1"
digest = "1:dcb51660fc1fd7bfa3f45305db912fa587c12c17658fd66b3ab55339b59ffbe6"
name = "gopkg.in/check.v1"
packages = ["."]
pruneopts = "UT"
revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/Azure/azure-pipeline-go/pipeline",
"github.com/Azure/go-autorest/autorest/adal",
"gopkg.in/check.v1",
]
solver-name = "gps-cdcl"
solver-version = 1

Просмотреть файл

@ -1,38 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/Azure/azure-pipeline-go"
version = "0.2.1"
[[constraint]]
branch = "v1"
name = "gopkg.in/check.v1"
[prune]
go-tests = true
unused-packages = true

Просмотреть файл

@ -17,7 +17,7 @@ import (
// This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface {
StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte) (*BlockBlobStageBlockResponse, error)
CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error)
CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap) (*BlockBlobCommitBlockListResponse, error)
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
@ -201,7 +201,7 @@ func (c *copier) close() error {
}
var err error
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions)
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap)
return err
}

Просмотреть файл

@ -58,7 +58,7 @@ func (f *fakeBlockWriter) StageBlock(ctx context.Context, blockID string, r io.R
return &BlockBlobStageBlockResponse{}, nil
}
func (f *fakeBlockWriter) CommitBlockList(ctx context.Context, blockIDs []string, headers BlobHTTPHeaders, meta Metadata, access BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
func (f *fakeBlockWriter) CommitBlockList(ctx context.Context, blockIDs []string, headers BlobHTTPHeaders, meta Metadata, access BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlockBlobCommitBlockListResponse, error) {
dst, err := os.OpenFile(filepath.Join(f.path, finalFileName), os.O_CREATE+os.O_WRONLY, 0600)
if err != nil {
return nil, err

1
azblob/common_utils.go Normal file
Просмотреть файл

@ -0,0 +1 @@
package azblob

Просмотреть файл

@ -55,6 +55,12 @@ type UploadToBlockBlobOptions struct {
// AccessConditions indicates the access conditions for the block blob.
AccessConditions BlobAccessConditions
// BlobAccessTier indicates the tier of blob
BlobAccessTier AccessTierType
// BlobTagsStg
BlobTagsMap BlobTagsMap
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
Parallelism uint16
}
@ -86,7 +92,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
if o.Progress != nil {
body = pipeline.NewRequestBodyProgress(body, o.Progress)
}
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap)
}
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
@ -130,7 +136,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
return nil, err
}
// All put blocks were successful, call Put Block List to finalize the blob
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap)
}
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
@ -363,6 +369,8 @@ type UploadStreamToBlockBlobOptions struct {
BlobHTTPHeaders BlobHTTPHeaders
Metadata Metadata
AccessConditions BlobAccessConditions
BlobAccessTier AccessTierType
BlobTagsMap BlobTagsMap
}
func (u *UploadStreamToBlockBlobOptions) defaults() {

Просмотреть файл

@ -1,6 +1,7 @@
package azblob
import (
"errors"
"net"
"net/url"
"strings"
@ -8,6 +9,7 @@ import (
const (
snapshot = "snapshot"
versionId = "versionid"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
)
@ -23,6 +25,7 @@ type BlobURLParts struct {
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
VersionID string // "" if not versioning enabled
}
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
@ -85,12 +88,20 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
// Convert the query parameters to a case-sensitive map & trim whitespace
paramsMap := u.Query()
up.Snapshot = "" // Assume no snapshot
up.Snapshot = "" // Assume no snapshot
up.VersionID = "" // Assume no versionID
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
up.Snapshot = snapshotStr[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, snapshot)
}
if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok {
up.VersionID = versionIDs[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, versionId) // delete "versionid" from paramsMap
delete(paramsMap, "versionId") // delete "versionId" from paramsMap
}
up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode()
return up
@ -124,6 +135,11 @@ func (up BlobURLParts) URL() url.URL {
rawQuery := up.UnparsedParams
// Check: Both snapshot and version id cannot be present in the request URL.
if up.Snapshot != "" && up.VersionID != "" {
errors.New("Snapshot and versioning cannot be enabled simultaneously")
}
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() {
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
@ -136,6 +152,15 @@ func (up BlobURLParts) URL() url.URL {
}
rawQuery += snapshot + "=" + up.Snapshot
}
// Concatenate blob version id query parameter (if it exists)
if up.VersionID != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += versionId + "=" + up.VersionID
}
sas := up.SAS.Encode()
if sas != "" {
if len(rawQuery) > 0 {

Просмотреть файл

@ -44,6 +44,14 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountC
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.Version != "" {
resource = "bv"
//Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
@ -155,7 +163,7 @@ func getCanonicalName(account string, containerName string, blobName string) str
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type ContainerSASPermissions struct {
Read, Add, Create, Write, Delete, List bool
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
}
// String produces the SAS permissions string for an Azure Storage container.
@ -177,9 +185,15 @@ func (p ContainerSASPermissions) String() string {
if p.Delete {
b.WriteRune('d')
}
if p.DeletePreviousVersion {
b.WriteRune('x')
}
if p.List {
b.WriteRune('l')
}
if p.Tag {
b.WriteRune('t')
}
return b.String()
}
@ -198,10 +212,14 @@ func (p *ContainerSASPermissions) Parse(s string) error {
p.Write = true
case 'd':
p.Delete = true
case 'x':
p.DeletePreviousVersion = true
case 'l':
p.List = true
case 't':
p.Tag = true
default:
return fmt.Errorf("Invalid permission: '%v'", r)
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
@ -209,7 +227,7 @@ func (p *ContainerSASPermissions) Parse(s string) error {
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag bool }
// String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSASSignatureValues's Permissions field.
@ -230,6 +248,12 @@ func (p BlobSASPermissions) String() string {
if p.Delete {
b.WriteRune('d')
}
if p.DeletePreviousVersion {
b.WriteRune('x')
}
if p.Tag {
b.WriteRune('t')
}
return b.String()
}
@ -248,8 +272,12 @@ func (p *BlobSASPermissions) Parse(s string) error {
p.Write = true
case 'd':
p.Delete = true
case 'x':
p.DeletePreviousVersion = true
case 't':
p.Tag = true
default:
return fmt.Errorf("Invalid permission: '%v'", r)
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil

Просмотреть файл

@ -42,19 +42,33 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL {
p := NewBlobURLParts(ab.URL())
p.VersionID = versionId
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
}
func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
return ab.blobClient.GetAccountInfo(ctx)
}
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap) (*AppendBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return ab.abClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
nil, // Blob ifTags
nil,
blobTagsString, // Blob tags
)
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
@ -74,7 +88,10 @@ func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac
ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
@ -86,9 +103,12 @@ func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.UR
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
transactionalMD5, nil, nil, nil,
nil, nil, EncryptionAlgorithmNone, // CPK
nil, // CPK-N
destinationAccessConditions.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
type AppendBlobAccessConditions struct {

Просмотреть файл

@ -2,9 +2,9 @@ package azblob
import (
"context"
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline"
"net/url"
"strings"
)
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
@ -12,6 +12,11 @@ type BlobURL struct {
blobClient blobClient
}
type BlobTagsMap map[string]string
var DefaultAccessTier AccessTierType = AccessTierNone
var DefaultPremiumBlobAccessTier PremiumPageBlobAccessTierType = PremiumPageBlobAccessTierNone
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
blobClient := newBlobClient(url, p)
@ -46,6 +51,14 @@ func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
}
// WithVersionID creates a new BlobURL object identical to the source but with the specified version id.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b BlobURL) WithVersionID(versionID string) BlobURL {
p := NewBlobURLParts(b.URL())
p.VersionID = versionID
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
}
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
@ -61,8 +74,35 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
}
func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string {
if blobTagsMap == nil {
return nil
}
tags := make([]string, 0)
for key, val := range blobTagsMap {
tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
}
//tags = tags[:len(tags)-1]
blobTagsString := strings.Join(tags, "&")
return &blobTagsString
}
func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags {
if blobTagsMap == nil {
return BlobTags{}
}
blobTagSet := make([]BlobTag, 0, len(blobTagsMap))
for key, val := range blobTagsMap {
blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val})
}
return BlobTags{BlobTagSet: blobTagSet}
}
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
var xRangeGetContentMD5 *bool
@ -70,11 +110,13 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
xRangeGetContentMD5 = &rangeGetContentMD5
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
dr, err := b.blobClient.Download(ctx, nil, nil,
dr, err := b.blobClient.Download(ctx, nil, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil,
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
if err != nil {
return nil, err
}
@ -87,12 +129,32 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo
}
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// Note 1: that deleting a blob also deletes all its snapshots.
// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params.
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (b BlobURL) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) {
tags := SerializeBlobTags(blobTagsMap)
return b.blobClient.SetTags(ctx, timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, &tags)
}
// The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (b BlobURL) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (*BlobTags, error) {
return b.blobClient.GetTags(ctx, timeout, requestID, snapshot, versionID, ifTags)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
@ -101,23 +163,33 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
return b.blobClient.Undelete(ctx, nil, nil)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account
// and on a block blob in a blob storage account (locally redundant storage only).
// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob.
// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag.
// Note: VersionId is an optional parameter which is part of request URL query params.
// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, RehydratePriorityNone, nil, lac.pointers())
return b.blobClient.SetTier(ctx, tier, nil,
nil, // Blob versioning
nil, RehydratePriorityNone, nil, lac.pointers())
}
// GetBlobProperties returns the blob's properties.
// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
// Therefore it not required to pass these here.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
return b.blobClient.GetProperties(ctx, nil,
nil, // Blob versioning
nil, ac.LeaseAccessConditions.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// SetBlobHTTPHeaders changes a blob's HTTP headers.
@ -127,6 +199,7 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
return b.blobClient.SetHTTPHeaders(ctx, nil,
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
&h.ContentDisposition, nil)
}
@ -135,8 +208,11 @@ func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobA
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// CreateSnapshot creates a read-only snapshot of a blob.
@ -147,8 +223,11 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
// performance hit.
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return b.blobClient.CreateSnapshot(ctx, nil, metadata,
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
ac.LeaseAccessConditions.pointers(), nil)
}
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
@ -157,7 +236,9 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// RenewLease renews the blob's previously-acquired lease.
@ -165,7 +246,9 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.RenewLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// ReleaseLease releases the blob's previously-acquired lease.
@ -173,7 +256,9 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAcce
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
@ -182,7 +267,9 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAc
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// ChangeLease changes the blob's lease ID.
@ -190,7 +277,9 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
@ -205,17 +294,22 @@ func leasePeriodPointer(period int32) (p *int32) {
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
AccessTierNone, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
nil, // source ifTags
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, nil)
nil, // Blob ifTags
dstLeaseID,
nil,
blobTagsString, // Blob tags
nil)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.

Просмотреть файл

@ -13,7 +13,7 @@ const (
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000
@ -45,6 +45,14 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
}
// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL {
p := NewBlobURLParts(bb.URL())
p.VersionID = versionId
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
}
func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
return bb.blobClient.GetAccountInfo(ctx)
}
@ -56,18 +64,23 @@ func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoR
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlockBlobUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
count, err := validateSeekableStreamAt0AndGetCount(body)
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
if err != nil {
return nil, err
}
return bb.bbClient.Upload(ctx, body, count, nil, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
nil, nil, EncryptionAlgorithmNone, // CPK
AccessTierNone, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil)
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil,
blobTagsString, // Blob tags
)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
@ -79,7 +92,8 @@ func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, bod
return nil, err
}
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
nil)
}
@ -90,6 +104,7 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil,
nil, nil, EncryptionAlgorithmNone, // CPK
nil, // CPK-N
destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
@ -99,36 +114,46 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlockBlobCommitBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
nil, nil, EncryptionAlgorithmNone, // CPK
AccessTierNone,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
nil, // CPK-N
tier,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil,
blobTagsString, // Blob tags
)
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(),
nil, // Blob ifTags
nil)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata,
srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte) (*BlobCopyFromURLResponse, error) {
func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobCopyFromURLResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, AccessTierNone,
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier,
srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, nil, srcContentMD5)
nil, // Blob ifTags
dstLeaseID, nil, srcContentMD5,
blobTagsString, // Blob tags
nil, // seal Blob
)
}

Просмотреть файл

@ -84,7 +84,9 @@ func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
return c.client.Create(ctx, nil, metadata, publicAccessType, nil,
nil, nil, // container encryption
)
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
@ -273,7 +275,7 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob
// BlobListingDetails indicates what additional information the service should return with each blob.
type BlobListingDetails struct {
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions bool
}
// string produces the Include query parameter's value.
@ -295,5 +297,11 @@ func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
if d.UncommittedBlobs {
items = append(items, ListBlobsIncludeItemUncommittedblobs)
}
if d.Tags {
items = append(items, ListBlobsIncludeItemTags)
}
if d.Versions {
items = append(items, ListBlobsIncludeItemVersions)
}
return items
}

Просмотреть файл

@ -44,19 +44,33 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL {
p := NewBlobURLParts(pb.URL())
p.VersionID = versionId
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
}
func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
return pb.blobClient.GetAccountInfo(ctx)
}
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap) (*PageBlobCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Create(ctx, 0, size, nil, PremiumPageBlobAccessTierNone,
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
return pb.pbClient.Create(ctx, 0, size, nil, tier,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
nil, nil, EncryptionAlgorithmNone, // CPK
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob tags
&sequenceNumber, nil,
blobTagsString, // Blob tags
)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
@ -74,8 +88,11 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
nil, // CPK-N
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
@ -89,10 +106,13 @@ func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL,
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil,
nil, nil, EncryptionAlgorithmNone, // CPK
nil, nil, EncryptionAlgorithmNone, // CPK-V
nil, // CPK-N
destinationAccessConditions.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
}
// ClearPages frees the specified pages from the page blob.
@ -104,6 +124,7 @@ func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
nil, // CPK-N
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}
@ -115,7 +136,23 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
return pb.pbClient.GetPageRanges(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot,
prevSnapshotURL, // Get managed disk diff
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
@ -123,9 +160,11 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
nil, // Get managed disk diff
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil, // Blob ifTags
nil)
}
@ -135,6 +174,7 @@ func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessCondi
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
nil, nil, EncryptionAlgorithmNone, // CPK
nil, // CPK-N
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}

Просмотреть файл

@ -116,14 +116,14 @@ type ListContainersSegmentOptions struct {
// TODO: update swagger to generate this type?
}
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) {
if o.Prefix != "" {
prefix = &o.Prefix
}
if o.MaxResults != 0 {
maxResults = &o.MaxResults
}
include = ListContainersIncludeType(o.Detail.string())
include = []ListContainersIncludeType{ListContainersIncludeType(o.Detail.string())}
return
}
@ -131,15 +131,21 @@ func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListC
type ListContainersDetail struct {
// Tells the service whether to return metadata for each container.
Metadata bool
// Show containers that have been deleted when the soft-delete feature is enabled.
// Deleted bool
}
// string produces the Include query parameter's value.
func (d *ListContainersDetail) string() string {
items := make([]string, 0, 1)
items := make([]string, 0, 2)
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
if d.Metadata {
items = append(items, string(ListContainersIncludeMetadata))
}
// if d.Deleted {
// items = append(items, string(ListContainersIncludeDeleted))
// }
if len(items) > 0 {
return strings.Join(items, ",")
}
@ -157,3 +163,12 @@ func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServi
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
return bsu.client.GetStatistics(ctx, nil, nil)
}
// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression.
// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container.
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
// To specify a container, eg. "@container=containerName and Name = C"
func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) {
return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults)
}

Просмотреть файл

@ -1,3 +1,3 @@
package azblob
const serviceLibVersion = "0.10"
const serviceLibVersion = "0.11"

Просмотреть файл

@ -1,4 +1,4 @@
// +build linux darwin freebsd openbsd netbsd dragonfly solaris
// +build linux darwin freebsd openbsd netbsd dragonfly solaris illumos
package azblob

Просмотреть файл

@ -76,7 +76,7 @@ func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Sh
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountSASPermissions struct {
Read, Write, Delete, List, Add, Create, Update, Process bool
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
}
// String produces the SAS permissions string for an Azure Storage account.
@ -92,6 +92,9 @@ func (p AccountSASPermissions) String() string {
if p.Delete {
buffer.WriteRune('d')
}
if p.DeletePreviousVersion {
buffer.WriteRune('x')
}
if p.List {
buffer.WriteRune('l')
}
@ -107,6 +110,12 @@ func (p AccountSASPermissions) String() string {
if p.Process {
buffer.WriteRune('p')
}
if p.Tag {
buffer.WriteRune('t')
}
if p.FilterByTags {
buffer.WriteRune('f')
}
return buffer.String()
}
@ -131,8 +140,14 @@ func (p *AccountSASPermissions) Parse(s string) error {
p.Update = true
case 'p':
p.Process = true
case 'x':
p.Process = true
case 't':
p.Tag = true
case 'f':
p.FilterByTags = true
default:
return fmt.Errorf("Invalid permission character: '%v'", r)
return fmt.Errorf("invalid permission character: '%v'", r)
}
}
return nil

Просмотреть файл

@ -114,6 +114,8 @@ const (
// ServiceCodeResourceNotFound means the specified resource does not exist (404).
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation"
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
ServiceCodeServerBusy ServiceCodeType = "ServerBusy"

Просмотреть файл

@ -79,7 +79,7 @@ func (e *storageError) Error() string {
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
func (e *storageError) Temporary() bool {
if e.response != nil {
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) {
return true
}
}

637
azblob/zt_blob_tags_test.go Normal file
Просмотреть файл

@ -0,0 +1,637 @@
package azblob
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/binary"
"fmt"
chk "gopkg.in/check.v1"
"io/ioutil"
"log"
"net/url"
"strings"
"time"
)
func (s *aztestsSuite) TestSetBlobTags(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blobTagsMap := BlobTagsMap{
"azure": "blob",
"blob": "sdk",
"sdk": "go",
}
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
blobSetTagsResponse, err := blobURL.SetTags(ctx, nil, nil, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(blobSetTagsResponse.StatusCode(), chk.Equals, 204)
blobGetTagsResponse, err := blobURL.GetTags(ctx, nil, nil, nil, nil, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResponse.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestSetBlobTagsWithVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blobTagsMap := BlobTagsMap{
"Go": "CPlusPlus",
"Python": "CSharp",
"Javascript": "Android",
}
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
versionId1 := blockBlobUploadResp.VersionID()
blockBlobUploadResp, err = blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
versionId2 := blockBlobUploadResp.VersionID()
blobSetTagsResponse, err := blobURL.SetTags(ctx, nil, &versionId1, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(blobSetTagsResponse.StatusCode(), chk.Equals, 204)
blobGetTagsResponse, err := blobURL.GetTags(ctx, nil, nil, nil, &versionId1, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResponse.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
blobGetTagsResponse, err = blobURL.GetTags(ctx, nil, nil, nil, &versionId2, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.IsNil)
}
func (s *aztestsSuite) TestSetBlobTagsWithVID2(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
versionId1 := blockBlobUploadResp.VersionID()
blockBlobUploadResp, err = blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
versionId2 := blockBlobUploadResp.VersionID()
blobTags1 := BlobTagsMap{
"Go": "CPlusPlus",
"Python": "CSharp",
"Javascript": "Android",
}
blobSetTagsResponse, err := blobURL.SetTags(ctx, nil, &versionId1, nil, nil, nil, nil, blobTags1)
c.Assert(err, chk.IsNil)
c.Assert(blobSetTagsResponse.StatusCode(), chk.Equals, 204)
blobGetTagsResponse, err := blobURL.GetTags(ctx, nil, nil, nil, &versionId1, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResponse.BlobTagSet {
c.Assert(blobTags1[blobTag.Key], chk.Equals, blobTag.Value)
}
blobTags2 := BlobTagsMap{
"a123": "321a",
"b234": "432b",
}
blobSetTagsResponse, err = blobURL.SetTags(ctx, nil, &versionId2, nil, nil, nil, nil, blobTags2)
c.Assert(err, chk.IsNil)
c.Assert(blobSetTagsResponse.StatusCode(), chk.Equals, 204)
blobGetTagsResponse, err = blobURL.GetTags(ctx, nil, nil, nil, &versionId2, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.NotNil)
for _, blobTag := range blobGetTagsResponse.BlobTagSet {
c.Assert(blobTags2[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestUploadBlockBlobWithSpecialCharactersInTags(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blobTagsMap := BlobTagsMap{
"+-./:=_ ": "firsttag",
"tag2": "+-./:=_",
"+-./:=_1": "+-./:=_",
}
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
blobGetTagsResponse, err := blobURL.GetTags(ctx, nil, nil, nil, nil, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResponse.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResponse.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResponse.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestStageBlockWithTags(c *chk.C) {
blockIDIntToBase64 := func(blockID int) string {
binaryBlockID := (&[4]byte{})[:]
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
return base64.StdEncoding.EncodeToString(binaryBlockID)
}
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer delContainer(c, containerURL)
blobURL := containerURL.NewBlockBlobURL(generateBlobName())
data := []string{"Azure ", "Storage ", "Block ", "Blob."}
base64BlockIDs := make([]string, len(data))
for index, d := range data {
base64BlockIDs[index] = blockIDIntToBase64(index)
resp, err := blobURL.StageBlock(ctx, base64BlockIDs[index], strings.NewReader(d), LeaseAccessConditions{}, nil)
if err != nil {
c.Fail()
}
c.Assert(resp.Response().StatusCode, chk.Equals, 201)
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
}
blobTagsMap := BlobTagsMap{
"azure": "blob",
"blob": "sdk",
"sdk": "go",
}
commitResp, err := blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(commitResp.VersionID(), chk.NotNil)
versionId := commitResp.VersionID()
contentResp, err := blobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
contentData, err := ioutil.ReadAll(contentResp.Body(RetryReaderOptions{}))
c.Assert(contentData, chk.DeepEquals, []uint8(strings.Join(data, "")))
blobGetTagsResp, err := blobURL.GetTags(ctx, nil, nil, nil, &versionId, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResp, chk.NotNil)
c.Assert(blobGetTagsResp.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResp.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
blobGetTagsResp, err = blobURL.GetTags(ctx, nil, nil, nil, nil, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResp, chk.NotNil)
c.Assert(blobGetTagsResp.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResp.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestStageBlockFromURLWithTags(c *chk.C) {
bsu := getBSU()
credential, err := getGenericCredential("")
if err != nil {
c.Fatal("Invalid credential")
}
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
testSize := 8 * 1024 * 1024 // 8MB
r, sourceData := getRandomDataAndReader(testSize)
ctx := ctx // Use default Background context
srcBlob := container.NewBlockBlobURL("sourceBlob")
destBlob := container.NewBlockBlobURL("destBlob")
blobTagsMap := BlobTagsMap{
"Go": "CPlusPlus",
"Python": "CSharp",
"Javascript": "Android",
}
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
// Get source blob URL with SAS for StageFromURL.
srcBlobParts := NewBlobURLParts(srcBlob.URL())
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
Permissions: BlobSASPermissions{Read: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
srcBlobURLWithSAS := srcBlobParts.URL()
blockID1, blockID2 := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))), base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 1)))
stageResp1, err := destBlob.StageBlockFromURL(ctx, blockID1, srcBlobURLWithSAS, 0, 4*1024*1024, LeaseAccessConditions{}, ModifiedAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(stageResp1.Response().StatusCode, chk.Equals, 201)
c.Assert(stageResp1.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.RequestID(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.Version(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.Date().IsZero(), chk.Equals, false)
stageResp2, err := destBlob.StageBlockFromURL(ctx, blockID2, srcBlobURLWithSAS, 4*1024*1024, CountToEnd, LeaseAccessConditions{}, ModifiedAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(stageResp2.Response().StatusCode, chk.Equals, 201)
c.Assert(stageResp2.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.RequestID(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.Version(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.Date().IsZero(), chk.Equals, false)
blockList, err := destBlob.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(blockList.Response().StatusCode, chk.Equals, 200)
c.Assert(blockList.CommittedBlocks, chk.HasLen, 0)
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 2)
listResp, err := destBlob.CommitBlockList(ctx, []string{blockID1, blockID2}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(listResp.Response().StatusCode, chk.Equals, 201)
//versionId := listResp.VersionID()
downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{}))
c.Assert(err, chk.IsNil)
c.Assert(destData, chk.DeepEquals, sourceData)
blobGetTagsResp, err := destBlob.GetTags(ctx, nil, nil, nil, nil, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResp.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResp.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestCopyBlockBlobFromURLWithTags(c *chk.C) {
bsu := getBSU()
credential, err := getGenericCredential("")
if err != nil {
c.Fatal("Invalid credential")
}
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
testSize := 1 * 1024 * 1024 // 1MB
r, sourceData := getRandomDataAndReader(testSize)
sourceDataMD5Value := md5.Sum(sourceData)
srcBlob := container.NewBlockBlobURL("srcBlob")
destBlob := container.NewBlockBlobURL("destBlob")
blobTagsMap := BlobTagsMap{
"Go": "CPlusPlus",
"Python": "CSharp",
"Javascript": "Android",
}
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
// Get source blob URL with SAS for StageFromURL.
srcBlobParts := NewBlobURLParts(srcBlob.URL())
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
Permissions: BlobSASPermissions{Read: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
srcBlobURLWithSAS := srcBlobParts.URL()
resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:], DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.ETag(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Date().IsZero(), chk.Equals, false)
c.Assert(resp.CopyID(), chk.Not(chk.Equals), "")
c.Assert(resp.ContentMD5(), chk.DeepEquals, sourceDataMD5Value[:])
c.Assert(string(resp.CopyStatus()), chk.DeepEquals, "success")
downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{}))
c.Assert(err, chk.IsNil)
c.Assert(destData, chk.DeepEquals, sourceData)
c.Assert(len(downloadResp.NewMetadata()), chk.Equals, 1)
_, badMD5 := getRandomDataAndReader(16)
_, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, badMD5, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.NotNil)
resp, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, nil, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.XMsContentCrc64(), chk.Not(chk.Equals), "")
}
func (s *aztestsSuite) TestGetPropertiesReturnsTagsCount(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blobTagsMap := BlobTagsMap{
"azure": "blob",
"blob": "sdk",
"sdk": "go",
}
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.StatusCode(), chk.Equals, 201)
getPropertiesResponse, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(getPropertiesResponse.TagCount(), chk.Equals, int64(3))
downloadResp, err := blobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
c.Assert(downloadResp, chk.NotNil)
c.Assert(downloadResp.r.rawResponse.Header.Get("x-ms-tag-count"), chk.Equals, "3")
}
func (s *aztestsSuite) TestSetBlobTagForSnapshot(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := createNewBlockBlob(c, containerURL)
blobTagsMap := BlobTagsMap{
"Microsoft Azure": "Azure Storage",
"Storage+SDK": "SDK/GO",
"GO ": ".Net",
}
_, err := blobURL.SetTags(ctx, nil, nil, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
snapshotURL := blobURL.WithSnapshot(resp.Snapshot())
resp2, err := snapshotURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(resp2.TagCount(), chk.Equals, int64(3))
}
func (s *aztestsSuite) TestCreatePageBlobWithTags(c *chk.C) {
bsu := getBSU()
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
blobTagsMap := BlobTagsMap{
"azure": "blob",
"blob": "sdk",
"sdk": "go",
}
blob, _ := createNewPageBlob(c, container)
putResp, err := blob.UploadPages(ctx, 0, getReaderToRandomBytes(1024), PageBlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(putResp.Response().StatusCode, chk.Equals, 201)
c.Assert(putResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(putResp.ETag(), chk.Not(chk.Equals), ETagNone)
c.Assert(putResp.Version(), chk.Not(chk.Equals), "")
c.Assert(putResp.rawResponse.Header.Get("x-ms-version-id"), chk.NotNil)
setTagResp, err := blob.SetTags(ctx, nil, nil, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(setTagResp.StatusCode(), chk.Equals, 204)
gpResp, err := blob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(gpResp, chk.NotNil)
c.Assert(gpResp.rawResponse.Header.Get("x-ms-tag-count"), chk.Equals, "3")
modifiedBlobTags := BlobTagsMap{
"a0z1u2r3e4": "b0l1o2b3",
"b0l1o2b3": "s0d1k2",
}
setTagResp, err = blob.SetTags(ctx, nil, nil, nil, nil, nil, nil, modifiedBlobTags)
c.Assert(err, chk.IsNil)
c.Assert(setTagResp.StatusCode(), chk.Equals, 204)
gpResp, err = blob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(gpResp, chk.NotNil)
c.Assert(gpResp.rawResponse.Header.Get("x-ms-tag-count"), chk.Equals, "2")
}
func (s *aztestsSuite) TestSetTagOnPageBlob(c *chk.C) {
bsu := getBSU()
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
blob, _ := getPageBlobURL(c, container)
blobTagsMap := BlobTagsMap{
"azure": "blob",
"blob": "sdk",
"sdk": "go",
}
resp, err := blob.Create(ctx, PageBlobPageBytes*10, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
gpResp, err := blob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(gpResp, chk.NotNil)
c.Assert(gpResp.rawResponse.Header.Get("x-ms-tag-count"), chk.Equals, "3")
modifiedBlobTags := BlobTagsMap{
"a0z1u2r3e4": "b0l1o2b3",
"b0l1o2b3": "s0d1k2",
}
setTagResp, err := blob.SetTags(ctx, nil, nil, nil, nil, nil, nil, modifiedBlobTags)
c.Assert(err, chk.IsNil)
c.Assert(setTagResp.StatusCode(), chk.Equals, 204)
gpResp, err = blob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(gpResp, chk.NotNil)
c.Assert(gpResp.rawResponse.Header.Get("x-ms-tag-count"), chk.Equals, "2")
}
func (s *aztestsSuite) TestCreateAppendBlobWithTags(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := createNewAppendBlob(c, containerURL)
blobProp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
createResp, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: blobProp.ETag()}}, nil)
c.Assert(err, chk.IsNil)
c.Assert(createResp.VersionID(), chk.NotNil)
blobProp, _ = blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(createResp.VersionID(), chk.Equals, blobProp.VersionID())
c.Assert(createResp.LastModified(), chk.DeepEquals, blobProp.LastModified())
c.Assert(createResp.ETag(), chk.Equals, blobProp.ETag())
c.Assert(blobProp.IsCurrentVersion(), chk.Equals, "true")
}
func (s *aztestsSuite) TestListBlobReturnsTags(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, blobName := createNewBlockBlob(c, containerURL)
blobTagsMap := BlobTagsMap{
"+-./:=_ ": "firsttag",
"tag2": "+-./:=_",
"+-./:=_1": "+-./:=_",
}
resp, err := blobURL.SetTags(ctx, nil, nil, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 204)
listBlobResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Tags: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobResp.Segment.BlobItems[0].Name, chk.Equals, blobName)
c.Assert(listBlobResp.Segment.BlobItems[0].BlobTags.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range listBlobResp.Segment.BlobItems[0].BlobTags.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
}
func (s *aztestsSuite) TestFindBlobsByTags(c *chk.C) {
bsu := getBSU()
containerURL1, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL1)
containerURL2, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL2)
containerURL3, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL3)
blobTagsMap1 := BlobTagsMap{
"tag2": "tagsecond",
"tag3": "tagthird",
}
blobTagsMap2 := BlobTagsMap{
"tag1": "firsttag",
"tag2": "secondtag",
"tag3": "thirdtag",
}
blobURL11, _ := getBlockBlobURL(c, containerURL1)
_, err := blobURL11.Upload(ctx, bytes.NewReader([]byte("random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap1)
c.Assert(err, chk.IsNil)
blobURL12, _ := getBlockBlobURL(c, containerURL1)
_, err = blobURL12.Upload(ctx, bytes.NewReader([]byte("another random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap2)
c.Assert(err, chk.IsNil)
blobURL21, _ := getBlockBlobURL(c, containerURL2)
_, err = blobURL21.Upload(ctx, bytes.NewReader([]byte("random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
blobURL22, _ := getBlockBlobURL(c, containerURL2)
_, err = blobURL22.Upload(ctx, bytes.NewReader([]byte("another random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, blobTagsMap2)
c.Assert(err, chk.IsNil)
blobURL31, _ := getBlockBlobURL(c, containerURL3)
_, err = blobURL31.Upload(ctx, bytes.NewReader([]byte("random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
where := "\"tag4\"='fourthtag'"
lResp, err := bsu.FindBlobsByTags(ctx, nil, nil, &where, Marker{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(lResp.Blobs, chk.HasLen, 0)
//where = "\"tag1\"='firsttag'AND\"tag2\"='secondtag'AND\"@container\"='"+ containerName1 + "'"
//TODO: Figure out how to do a composite query based on container.
where = "\"tag1\"='firsttag'AND\"tag2\"='secondtag'"
lResp, err = bsu.FindBlobsByTags(ctx, nil, nil, &where, Marker{}, nil)
c.Assert(err, chk.IsNil)
for _, blob := range lResp.Blobs {
c.Assert(blob.TagValue, chk.Equals, "firsttag")
}
}
func (s *aztestsSuite) TestFilterBlobsUsingAccountSAS(c *chk.C) {
accountName, accountKey := accountInfo()
credential, err := NewSharedKeyCredential(accountName, accountKey)
if err != nil {
c.Fail()
}
sasQueryParams, err := AccountSASSignatureValues{
Protocol: SASProtocolHTTPS,
ExpiryTime: time.Now().UTC().Add(48 * time.Hour),
Permissions: AccountSASPermissions{Read: true, List: true, Write: true, DeletePreviousVersion: true, Tag: true, FilterByTags: true, Create: true}.String(),
Services: AccountSASServices{Blob: true}.String(),
ResourceTypes: AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
log.Fatal(err)
}
qp := sasQueryParams.Encode()
urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net?%s", accountName, qp)
u, _ := url.Parse(urlToSendToSomeone)
serviceURL := NewServiceURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
containerName := generateContainerName()
containerURL := serviceURL.NewContainerURL(containerName)
_, err = containerURL.Create(ctx, Metadata{}, PublicAccessNone)
defer containerURL.Delete(ctx, ContainerAccessConditions{})
if err != nil {
c.Fatal(err)
}
blobURL := containerURL.NewBlockBlobURL("temp")
_, err = blobURL.Upload(ctx, bytes.NewReader([]byte("random data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
c.Fail()
}
blobTagsMap := BlobTagsMap{"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
setBlobTagsResp, err := blobURL.SetTags(ctx, nil, nil, nil, nil, nil, nil, blobTagsMap)
c.Assert(err, chk.IsNil)
c.Assert(setBlobTagsResp.StatusCode(), chk.Equals, 204)
blobGetTagsResp, err := blobURL.GetTags(ctx, nil, nil, nil, nil, nil)
c.Assert(err, chk.IsNil)
c.Assert(blobGetTagsResp.StatusCode(), chk.Equals, 200)
c.Assert(blobGetTagsResp.BlobTagSet, chk.HasLen, 3)
for _, blobTag := range blobGetTagsResp.BlobTagSet {
c.Assert(blobTagsMap[blobTag.Key], chk.Equals, blobTag.Value)
}
time.Sleep(30 * time.Second)
where := "\"tag1\"='firsttag'AND\"tag2\"='secondtag'AND@container='" + containerName + "'"
_, err = serviceURL.FindBlobsByTags(ctx, nil, nil, &where, Marker{}, nil)
c.Assert(err, chk.IsNil)
}

Просмотреть файл

@ -0,0 +1,379 @@
package azblob
import (
"context"
"encoding/base64"
"encoding/binary"
"io/ioutil"
"time"
"crypto/md5"
"bytes"
"strings"
chk "gopkg.in/check.v1" // go get gopkg.in/check.v1
)
func (s *aztestsSuite) TestGetBlobPropertiesUsingVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := createNewAppendBlob(c, containerURL)
blobProp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
createResp, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: blobProp.ETag()}}, nil)
c.Assert(err, chk.IsNil)
c.Assert(createResp.VersionID(), chk.NotNil)
blobProp, _ = blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(createResp.VersionID(), chk.Equals, blobProp.VersionID())
c.Assert(createResp.LastModified(), chk.DeepEquals, blobProp.LastModified())
c.Assert(createResp.ETag(), chk.Equals, blobProp.ETag())
c.Assert(blobProp.IsCurrentVersion(), chk.Equals, "true")
}
func (s *aztestsSuite) TestSetBlobMetadataReturnsVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, blobName := createNewBlockBlob(c, containerURL)
metadata := Metadata{"test_key_1": "test_value_1", "test_key_2": "2019"}
resp, err := blobURL.SetMetadata(ctx, metadata, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(resp.VersionID(), chk.NotNil)
listBlobResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Metadata: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobResp.Segment.BlobItems[0].Name, chk.Equals, blobName)
c.Assert(listBlobResp.Segment.BlobItems[0].Metadata, chk.HasLen, 2)
c.Assert(listBlobResp.Segment.BlobItems[0].Metadata, chk.DeepEquals, metadata)
}
func (s *aztestsSuite) TestCreateAndDownloadBlobSpecialCharactersWithVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
data := []rune("-._/()$=',~0123456789")
for i := 0; i < len(data); i++ {
blobName := "abc" + string(data[i])
blobURL := containerURL.NewBlockBlobURL(blobName)
resp, err := blobURL.Upload(ctx, strings.NewReader(string(data[i])), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.VersionID(), chk.NotNil)
dResp, err := blobURL.WithVersionID(resp.VersionID()).Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
d1, err := ioutil.ReadAll(dResp.Body(RetryReaderOptions{}))
c.Assert(dResp.Version(), chk.Not(chk.Equals), "")
c.Assert(string(d1), chk.DeepEquals, string(data[i]))
versionId := dResp.r.rawResponse.Header.Get("x-ms-version-id")
c.Assert(versionId, chk.NotNil)
c.Assert(versionId, chk.Equals, resp.VersionID())
}
}
func (s *aztestsSuite) TestDeleteSpecificBlobVersion(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.VersionID(), chk.NotNil)
versionID1 := blockBlobUploadResp.VersionID()
blockBlobUploadResp, err = blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp.VersionID(), chk.NotNil)
listBlobsResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Versions: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobsResp.Segment.BlobItems, chk.HasLen, 2)
// Deleting previous version snapshot.
deleteResp, err := blobURL.WithVersionID(versionID1).Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(deleteResp.StatusCode(), chk.Equals, 202)
listBlobsResp, err = containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Versions: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobsResp.Segment.BlobItems, chk.NotNil)
if len(listBlobsResp.Segment.BlobItems) != 1 {
c.Fail()
}
}
func (s *aztestsSuite) TestDeleteSpecificBlobVersionWithBlobSAS(c *chk.C) {
bsu := getBSU()
credential, err := getGenericCredential("")
if err != nil {
c.Fatal(err)
}
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, blobName := getBlockBlobURL(c, containerURL)
resp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
versionId := resp.VersionID()
c.Assert(versionId, chk.NotNil)
resp, err = blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.VersionID(), chk.NotNil)
blobParts := NewBlobURLParts(blobURL.URL())
blobParts.VersionID = versionId
blobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS,
ExpiryTime: time.Now().UTC().Add(1 * time.Hour),
ContainerName: containerName,
BlobName: blobName,
Permissions: BlobSASPermissions{Delete: true, DeletePreviousVersion: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
sbURL := NewBlockBlobURL(blobParts.URL(), containerURL.client.p)
deleteResp, err := sbURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
c.Assert(deleteResp, chk.IsNil)
listBlobResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Versions: true}})
c.Assert(err, chk.IsNil)
for _, blob := range listBlobResp.Segment.BlobItems {
c.Assert(blob.VersionID, chk.Not(chk.Equals), versionId)
}
}
func (s *aztestsSuite) TestDownloadSpecificBlobVersion(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
blockBlobUploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp, chk.NotNil)
versionId1 := blockBlobUploadResp.VersionID()
blockBlobUploadResp, err = blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(blockBlobUploadResp, chk.NotNil)
versionId2 := blockBlobUploadResp.VersionID()
c.Assert(blockBlobUploadResp.VersionID(), chk.NotNil)
// Download previous version of snapshot.
blobURL = blobURL.WithVersionID(versionId1)
blockBlobDeleteResp, err := blobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
data, err := ioutil.ReadAll(blockBlobDeleteResp.Response().Body)
c.Assert(string(data), chk.Equals, "data")
// Download current version of snapshot.
blobURL = blobURL.WithVersionID(versionId2)
blockBlobDeleteResp, err = blobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
data, err = ioutil.ReadAll(blockBlobDeleteResp.Response().Body)
c.Assert(string(data), chk.Equals, "updated_data")
}
func (s *aztestsSuite) TestCreateBlobSnapshotReturnsVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer delContainer(c, containerURL)
blobURL := containerURL.NewBlockBlobURL(generateBlobName())
uploadResp, err := blobURL.Upload(ctx, bytes.NewReader([]byte("updated_data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadResp.VersionID(), chk.NotNil)
csResp, err := blobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(csResp.VersionID(), chk.NotNil)
lbResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{
Details: BlobListingDetails{Versions: true, Snapshots: true},
})
c.Assert(lbResp, chk.NotNil)
if len(lbResp.Segment.BlobItems) < 2 {
c.Fail()
}
_, err = blobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{})
lbResp, err = containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{
Details: BlobListingDetails{Versions: true, Snapshots: true},
})
c.Assert(lbResp, chk.NotNil)
if len(lbResp.Segment.BlobItems) < 2 {
c.Fail()
}
for _, blob := range lbResp.Segment.BlobItems {
c.Assert(blob.Snapshot, chk.Equals, "")
}
}
func (s *aztestsSuite) TestCopyBlobFromURLWithSASReturnsVID(c *chk.C) {
bsu := getBSU()
credential, err := getGenericCredential("")
if err != nil {
c.Fatal("Invalid credential")
}
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
testSize := 4 * 1024 * 1024 // 4MB
r, sourceData := getRandomDataAndReader(testSize)
sourceDataMD5Value := md5.Sum(sourceData)
ctx := context.Background()
srcBlob := container.NewBlockBlobURL(generateBlobName())
destBlob := container.NewBlockBlobURL(generateBlobName())
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
c.Assert(uploadSrcResp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
srcBlobParts := NewBlobURLParts(srcBlob.URL())
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
Permissions: BlobSASPermissions{Read: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
srcBlobURLWithSAS := srcBlobParts.URL()
resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:], DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.CopyID(), chk.Not(chk.Equals), "")
c.Assert(string(resp.CopyStatus()), chk.DeepEquals, "success")
c.Assert(resp.VersionID(), chk.NotNil)
downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{}))
c.Assert(err, chk.IsNil)
c.Assert(destData, chk.DeepEquals, sourceData)
c.Assert(downloadResp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
c.Assert(len(downloadResp.NewMetadata()), chk.Equals, 1)
_, badMD5 := getRandomDataAndReader(16)
_, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, badMD5, DefaultAccessTier, nil)
c.Assert(err, chk.NotNil)
resp, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, nil, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.XMsContentCrc64(), chk.Not(chk.Equals), "")
c.Assert(resp.Response().Header.Get("x-ms-version"), chk.Equals, ServiceVersion)
c.Assert(resp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
}
func (s *aztestsSuite) TestCreateBlockBlobReturnsVID(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer delContainer(c, containerURL)
testSize := 2 * 1024 * 1024 // 1MB
r, _ := getRandomDataAndReader(testSize)
ctx := context.Background() // Use default Background context
blobURL := containerURL.NewBlockBlobURL(generateBlobName())
// Prepare source blob for copy.
uploadResp, err := blobURL.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadResp.Response().StatusCode, chk.Equals, 201)
c.Assert(uploadResp.rawResponse.Header.Get("x-ms-version"), chk.Equals, ServiceVersion)
c.Assert(uploadResp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
csResp, err := blobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(csResp.Response().StatusCode, chk.Equals, 201)
c.Assert(csResp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
listBlobResp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobResp.rawResponse.Header.Get("x-ms-request-id"), chk.NotNil)
if len(listBlobResp.Segment.BlobItems) < 2 {
c.Fail()
}
deleteResp, err := blobURL.Delete(ctx, DeleteSnapshotsOptionOnly, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(deleteResp.Response().StatusCode, chk.Equals, 202)
c.Assert(deleteResp.Response().Header.Get("x-ms-version-id"), chk.NotNil)
listBlobResp, err = containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true, Versions: true}})
c.Assert(err, chk.IsNil)
c.Assert(listBlobResp.rawResponse.Header.Get("x-ms-request-id"), chk.NotNil)
if len(listBlobResp.Segment.BlobItems) == 0 {
c.Fail()
}
blobs := listBlobResp.Segment.BlobItems
c.Assert(blobs[0].Snapshot, chk.Equals, "")
}
func (s *aztestsSuite) TestPutBlockListReturnsVID(c *chk.C) {
blockIDIntToBase64 := func(blockID int) string {
binaryBlockID := (&[4]byte{})[:]
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
return base64.StdEncoding.EncodeToString(binaryBlockID)
}
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer delContainer(c, containerURL)
blobURL := containerURL.NewBlockBlobURL(generateBlobName())
data := []string{"Azure ", "Storage ", "Block ", "Blob."}
base64BlockIDs := make([]string, len(data))
for index, d := range data {
base64BlockIDs[index] = blockIDIntToBase64(index)
resp, err := blobURL.StageBlock(ctx, base64BlockIDs[index], strings.NewReader(d), LeaseAccessConditions{}, nil)
if err != nil {
c.Fail()
}
c.Assert(resp.Response().StatusCode, chk.Equals, 201)
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
}
commitResp, err := blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(commitResp.VersionID(), chk.NotNil)
contentResp, err := blobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
contentData, err := ioutil.ReadAll(contentResp.Body(RetryReaderOptions{}))
c.Assert(contentData, chk.DeepEquals, []uint8(strings.Join(data, "")))
}
func (s *aztestsSuite) TestSyncCopyBlobReturnsVID(c *chk.C) {
}
func (s *aztestsSuite) TestCreatePageBlobReturnsVID(c *chk.C) {
bsu := getBSU()
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
blob, _ := createNewPageBlob(c, container)
putResp, err := blob.UploadPages(context.Background(), 0, getReaderToRandomBytes(1024), PageBlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(putResp.Response().StatusCode, chk.Equals, 201)
c.Assert(putResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(putResp.ETag(), chk.Not(chk.Equals), ETagNone)
c.Assert(putResp.Version(), chk.Not(chk.Equals), "")
c.Assert(putResp.rawResponse.Header.Get("x-ms-version-id"), chk.NotNil)
gpResp, err := blob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(gpResp, chk.NotNil)
}

Просмотреть файл

@ -72,7 +72,7 @@ func Example() {
// Create the blob with string (plain text) content.
data := "Hello World!"
_, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -429,8 +429,7 @@ func ExampleContainerURL_SetContainerAccessPolicy() {
blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
// Create the blob and put some text in it
_, err = blobURL.Upload(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"},
Metadata{}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -494,7 +493,7 @@ func ExampleBlobAccessConditions() {
}
// Create the blob (unconditionally; succeeds)
upload, err := blobURL.Upload(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
upload, err := blobURL.Upload(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
showResult(upload, err)
// Download blob content if the blob has been modified since we uploaded it (fails):
@ -506,8 +505,7 @@ func ExampleBlobAccessConditions() {
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false))
// Upload new content if the blob hasn't changed since the version identified by ETag (succeeds):
upload, err = blobURL.Upload(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: upload.ETag()}})
upload, err = blobURL.Upload(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: upload.ETag()}}, DefaultAccessTier, nil)
showResult(upload, err)
// Download content if it has changed since the version identified by ETag (fails):
@ -515,8 +513,7 @@ func ExampleBlobAccessConditions() {
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: upload.ETag()}}, false))
// Upload content if the blob doesn't already exist (fails):
showResult(blobURL.Upload(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETagAny}}))
showResult(blobURL.Upload(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETagAny}}, DefaultAccessTier, nil))
}
// This examples shows how to create a container with metadata and then how to read & update the metadata.
@ -585,8 +582,7 @@ func ExampleMetadata_blobs() {
// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
creatingApp, _ := os.Executable()
_, err = blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{},
Metadata{"author": "Jeffrey", "app": creatingApp}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{"author": "Jeffrey", "app": creatingApp}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -633,11 +629,10 @@ func ExampleBlobHTTPHeaders() {
ctx := context.Background() // This example uses a never-expiring context
// Create a blob with HTTP headers
_, err = blobURL.Upload(ctx, strings.NewReader("Some text"),
BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -716,7 +711,7 @@ func ExampleBlockBlobURL() {
}
// After all the blocks are uploaded, atomically commit them to the blob.
_, err = blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -759,7 +754,7 @@ func ExampleAppendBlobURL() {
appendBlobURL := NewAppendBlobURL(*u, NewPipeline(credential, PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
_, err = appendBlobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = appendBlobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, nil)
if err != nil {
log.Fatal(err)
}
@ -799,8 +794,7 @@ func ExamplePageBlobURL() {
blobURL := NewPageBlobURL(*u, NewPipeline(credential, PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
_, err = blobURL.Create(ctx, PageBlobPageBytes*4, 0, BlobHTTPHeaders{},
Metadata{}, BlobAccessConditions{})
_, err = blobURL.Create(ctx, PageBlobPageBytes*4, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -870,7 +864,7 @@ func Example_blobSnapshots() {
ctx := context.Background() // This example uses a never-expiring context
// Create the original blob:
_, err = baseBlobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = baseBlobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -880,7 +874,7 @@ func Example_blobSnapshots() {
snapshot := createSnapshot.Snapshot()
// Modify the original blob & show it:
_, err = baseBlobURL.Upload(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = baseBlobURL.Upload(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -928,7 +922,7 @@ func Example_blobSnapshots() {
}
// Promote read-only snapshot to writable base blob:
_, err = baseBlobURL.StartCopyFromURL(ctx, snapshotBlobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{})
_, err = baseBlobURL.StartCopyFromURL(ctx, snapshotBlobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -966,14 +960,12 @@ func Example_progressUploadDownload() {
requestBody := strings.NewReader("Some text to write")
// Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting.
_, err = blobURL.Upload(ctx,
pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) {
fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Size())
}),
BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) {
fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Size())
}), BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -1013,7 +1005,7 @@ func ExampleBlobURL_startCopy() {
ctx := context.Background() // This example uses a never-expiring context
src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg")
startCopy, err := blobURL.StartCopyFromURL(ctx, *src, nil, ModifiedAccessConditions{}, BlobAccessConditions{})
startCopy, err := blobURL.StartCopyFromURL(ctx, *src, nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal(err)
}
@ -1259,7 +1251,7 @@ func ExampleListBlobsHierarchy() {
blobNames := []string{"a/1", "a/2", "b/1", "boaty_mcboatface"}
for _, blobName := range blobNames {
blobURL := containerURL.NewBlockBlobURL(blobName)
_, err := blobURL.Upload(ctx, strings.NewReader("test"), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, strings.NewReader("test"), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
log.Fatal("an error occurred while creating blobs for the example setup")

Просмотреть файл

@ -24,7 +24,7 @@ func (s *aztestsSuite) TestSnapshotSAS(c *chk.C) {
burl := containerURL.NewBlockBlobURL(blobName)
data := "Hello world!"
_, err = burl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
_, err = burl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
c.Fatal(err)
}
@ -61,6 +61,7 @@ func (s *aztestsSuite) TestSnapshotSAS(c *chk.C) {
if err != nil {
c.Fatal(err)
}
time.Sleep(time.Second * 2)
//Attach SAS query to block blob URL
p := NewPipeline(NewAnonymousCredential(), PipelineOptions{})
@ -91,7 +92,7 @@ func (s *aztestsSuite) TestSnapshotSAS(c *chk.C) {
//If this succeeds, it means a normal SAS token was created.
fsburl := containerURL.NewBlockBlobURL("failsnap")
_, err = fsburl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
_, err = fsburl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
c.Fatal(err) //should succeed to create the blob via normal auth means
}

Просмотреть файл

@ -166,8 +166,7 @@ func createNewContainerWithSuffix(c *chk.C, bsu ServiceURL, suffix string) (cont
func createNewBlockBlob(c *chk.C, container ContainerURL) (blob BlockBlobURL, name string) {
blob, name = getBlockBlobURL(c, container)
cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{},
nil, BlobAccessConditions{})
cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp.StatusCode(), chk.Equals, 201)
@ -178,7 +177,7 @@ func createNewBlockBlob(c *chk.C, container ContainerURL) (blob BlockBlobURL, na
func createNewAppendBlob(c *chk.C, container ContainerURL) (blob AppendBlobURL, name string) {
blob, name = getAppendBlobURL(c, container)
resp, err := blob.Create(ctx, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
resp, err := blob.Create(ctx, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
@ -188,7 +187,7 @@ func createNewAppendBlob(c *chk.C, container ContainerURL) (blob AppendBlobURL,
func createNewPageBlob(c *chk.C, container ContainerURL) (blob PageBlobURL, name string) {
blob, name = getPageBlobURL(c, container)
resp, err := blob.Create(ctx, PageBlobPageBytes*10, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
resp, err := blob.Create(ctx, PageBlobPageBytes*10, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
return
@ -197,7 +196,7 @@ func createNewPageBlob(c *chk.C, container ContainerURL) (blob PageBlobURL, name
func createNewPageBlobWithSize(c *chk.C, container ContainerURL, sizeInBytes int64) (blob PageBlobURL, name string) {
blob, name = getPageBlobURL(c, container)
resp, err := blob.Create(ctx, sizeInBytes, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
resp, err := blob.Create(ctx, sizeInBytes, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
@ -208,8 +207,7 @@ func createBlockBlobWithPrefix(c *chk.C, container ContainerURL, prefix string)
name = prefix + generateName(blobPrefix)
blob = container.NewBlockBlobURL(name)
cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{},
nil, BlobAccessConditions{})
cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp.StatusCode(), chk.Equals, 201)

Просмотреть файл

@ -20,7 +20,7 @@ func (s *aztestsSuite) TestAppendBlock(c *chk.C) {
blob := container.NewAppendBlobURL(generateBlobName())
resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
@ -49,7 +49,7 @@ func (s *aztestsSuite) TestAppendBlockWithMD5(c *chk.C) {
// set up blob to test
blob := container.NewAppendBlobURL(generateBlobName())
resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201)
@ -91,7 +91,7 @@ func (s *aztestsSuite) TestAppendBlockFromURL(c *chk.C) {
destBlob := container.NewAppendBlobURL(generateName("appenddest"))
// Prepare source blob for copy.
cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp1.StatusCode(), chk.Equals, 201)
appendResp, err := srcBlob.AppendBlock(context.Background(), r, AppendBlobAccessConditions{}, nil)
@ -123,7 +123,7 @@ func (s *aztestsSuite) TestAppendBlockFromURL(c *chk.C) {
srcBlobURLWithSAS := srcBlobParts.URL()
// Append block from URL.
cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp2.StatusCode(), chk.Equals, 201)
appendFromURLResp, err := destBlob.AppendBlockFromURL(ctx, srcBlobURLWithSAS, 0, int64(testSize), AppendBlobAccessConditions{}, ModifiedAccessConditions{}, nil)
@ -163,7 +163,7 @@ func (s *aztestsSuite) TestAppendBlockFromURLWithMD5(c *chk.C) {
destBlob := container.NewAppendBlobURL(generateName("appenddest"))
// Prepare source blob for copy.
cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp1.StatusCode(), chk.Equals, 201)
appendResp, err := srcBlob.AppendBlock(context.Background(), r, AppendBlobAccessConditions{}, nil)
@ -195,7 +195,7 @@ func (s *aztestsSuite) TestAppendBlockFromURLWithMD5(c *chk.C) {
srcBlobURLWithSAS := srcBlobParts.URL()
// Append block from URL.
cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
c.Assert(cResp2.StatusCode(), chk.Equals, 201)
appendFromURLResp, err := destBlob.AppendBlockFromURL(ctx, srcBlobURLWithSAS, 0, int64(testSize), AppendBlobAccessConditions{}, ModifiedAccessConditions{}, md5Value[:])
@ -229,7 +229,7 @@ func (s *aztestsSuite) TestBlobCreateAppendMetadataNonEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getAppendBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -243,7 +243,7 @@ func (s *aztestsSuite) TestBlobCreateAppendMetadataEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getAppendBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -257,7 +257,7 @@ func (s *aztestsSuite) TestBlobCreateAppendMetadataInvalid(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getAppendBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{}, nil)
c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true)
}
@ -267,7 +267,7 @@ func (s *aztestsSuite) TestBlobCreateAppendHTTPHeaders(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getAppendBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, basicHeaders, nil, BlobAccessConditions{})
_, err := blobURL.Create(ctx, basicHeaders, nil, BlobAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -290,8 +290,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfModifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil)
c.Assert(err, chk.IsNil)
validateAppendBlobPut(c, blobURL)
@ -305,8 +304,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfModifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -318,8 +316,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfUnmodifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil)
c.Assert(err, chk.IsNil)
validateAppendBlobPut(c, blobURL)
@ -333,8 +330,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfUnmodifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -346,8 +342,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfMatchTrue(c *chk.C) {
resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, nil)
c.Assert(err, chk.IsNil)
validateAppendBlobPut(c, blobURL)
@ -359,8 +354,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfMatchFalse(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := createNewAppendBlob(c, containerURL)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -370,8 +364,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfNoneMatchTrue(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := createNewAppendBlob(c, containerURL)
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, nil)
c.Assert(err, chk.IsNil)
validateAppendBlobPut(c, blobURL)
@ -385,8 +378,7 @@ func (s *aztestsSuite) TestBlobCreateAppendIfNoneMatchFalse(c *chk.C) {
resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}})
_, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}

Просмотреть файл

@ -94,7 +94,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestEmpty(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
blobCopyResponse, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{})
blobCopyResponse, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, copyBlobURL, blobCopyResponse)
@ -115,7 +115,7 @@ func (s *aztestsSuite) TestBlobStartCopyMetadata(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, copyBlobURL, resp)
@ -132,11 +132,10 @@ func (s *aztestsSuite) TestBlobStartCopyMetadataNil(c *chk.C) {
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
// Have the destination start with metadata so we ensure the nil metadata passed later takes effect
_, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{},
basicMetadata, BlobAccessConditions{})
_, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, copyBlobURL, resp)
@ -154,11 +153,10 @@ func (s *aztestsSuite) TestBlobStartCopyMetadataEmpty(c *chk.C) {
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
// Have the destination start with metadata so we ensure the empty metadata passed later takes effect
_, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{},
basicMetadata, BlobAccessConditions{})
_, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, copyBlobURL, resp)
@ -175,7 +173,7 @@ func (s *aztestsSuite) TestBlobStartCopyMetadataInvalidField(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{"I nvalid.": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{})
_, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{"I nvalid.": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.NotNil)
c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true)
}
@ -187,7 +185,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceNonExistant(c *chk.C) {
blobURL, _ := getBlockBlobURL(c, containerURL)
copyBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{})
_, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeBlobNotFound)
}
@ -211,7 +209,7 @@ func (s *aztestsSuite) TestBlobStartCopySourcePrivate(c *chk.C) {
if bsu.String() == bsu2.String() {
c.Skip("Test not valid because primary and secondary accounts are the same")
}
_, err = copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{})
_, err = copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeCannotVerifyCopySource)
}
@ -250,7 +248,7 @@ func (s *aztestsSuite) TestBlobStartCopyUsingSASSrc(c *chk.C) {
defer deleteContainer(c, copyContainerURL)
copyBlobURL, _ := getBlockBlobURL(c, copyContainerURL)
resp, err := copyBlobURL.StartCopyFromURL(ctx, sasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := copyBlobURL.StartCopyFromURL(ctx, sasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, copyBlobURL, resp)
@ -321,7 +319,7 @@ func (s *aztestsSuite) TestBlobStartCopyUsingSASDest(c *chk.C) {
srcBlobWithSasURL := blobURL.URL()
srcBlobWithSasURL.RawQuery = queryParams.Encode()
resp, err := anonBlobURL.StartCopyFromURL(ctx, srcBlobWithSasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := anonBlobURL.StartCopyFromURL(ctx, srcBlobWithSasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
// Allow copy to happen
@ -346,9 +344,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfModifiedSinceTrue(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{IfModifiedSince: currentTime},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfModifiedSince: currentTime}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -365,9 +361,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfModifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil,
ModifiedAccessConditions{IfModifiedSince: currentTime},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfModifiedSince: currentTime}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeSourceConditionNotMet)
}
@ -380,9 +374,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfUnmodifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{IfUnmodifiedSince: currentTime},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfUnmodifiedSince: currentTime}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -399,9 +391,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfUnmodifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil,
ModifiedAccessConditions{IfUnmodifiedSince: currentTime},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfUnmodifiedSince: currentTime}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeSourceConditionNotMet)
}
@ -416,9 +406,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfMatchTrue(c *chk.C) {
etag := resp.ETag()
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{IfMatch: etag},
BlobAccessConditions{})
_, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfMatch: etag}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp2, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -433,9 +421,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfMatchFalse(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{IfMatch: "a"},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfMatch: "a"}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeSourceConditionNotMet)
}
@ -446,9 +432,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfNoneMatchTrue(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{IfNoneMatch: "a"},
BlobAccessConditions{})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfNoneMatch: "a"}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp2, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -467,9 +451,7 @@ func (s *aztestsSuite) TestBlobStartCopySourceIfNoneMatchFalse(c *chk.C) {
etag := resp.ETag()
destBlobURL, _ := getBlockBlobURL(c, containerURL)
_, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil,
ModifiedAccessConditions{IfNoneMatch: etag},
BlobAccessConditions{})
_, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfNoneMatch: etag}, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeSourceConditionNotMet)
}
@ -481,9 +463,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfModifiedSinceTrue(c *chk.C) {
blobURL, _ := createNewBlockBlob(c, containerURL)
destBlobURL, _ := createNewBlockBlob(c, containerURL) // The blob must exist to have a last-modified time
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -500,9 +480,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfModifiedSinceFalse(c *chk.C) {
destBlobURL, _ := createNewBlockBlob(c, containerURL)
currentTime := getRelativeTimeGMT(10)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil,
ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeTargetConditionNotMet)
}
@ -515,9 +493,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfUnmodifiedSinceTrue(c *chk.C) {
destBlobURL, _ := createNewBlockBlob(c, containerURL)
currentTime := getRelativeTimeGMT(10)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -534,9 +510,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfUnmodifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
destBlobURL, _ := createNewBlockBlob(c, containerURL)
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil,
ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeTargetConditionNotMet)
}
@ -550,9 +524,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfMatchTrue(c *chk.C) {
resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
etag := resp.ETag()
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata,
ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err = destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -572,8 +544,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfMatchFalse(c *chk.C) {
destBlobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) // SetMetadata chances the blob's etag
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeTargetConditionNotMet)
}
@ -589,8 +560,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfNoneMatchTrue(c *chk.C) {
destBlobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) // SetMetadata chances the blob's etag
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err = destBlobURL.GetProperties(ctx, BlobAccessConditions{})
@ -608,8 +578,7 @@ func (s *aztestsSuite) TestBlobStartCopyDestIfNoneMatchFalse(c *chk.C) {
resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{})
etag := resp.ETag()
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{},
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}})
_, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeTargetConditionNotMet)
}
@ -625,7 +594,7 @@ func (s *aztestsSuite) TestBlobAbortCopyInProgress(c *chk.C) {
for i := range blobData {
blobData[i] = byte('a' + i%26)
}
_, err := blobURL.Upload(ctx, bytes.NewReader(blobData), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, bytes.NewReader(blobData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
containerURL.SetAccessPolicy(ctx, PublicAccessBlob, nil, ContainerAccessConditions{}) // So that we don't have to create a SAS
@ -641,7 +610,7 @@ func (s *aztestsSuite) TestBlobAbortCopyInProgress(c *chk.C) {
defer deleteContainer(c, copyContainerURL)
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{})
resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.CopyStatus(), chk.Equals, CopyStatusPending)
@ -1737,23 +1706,23 @@ func (s *aztestsSuite) TestBlobSetMetadataIfNoneMatchFalse(c *chk.C) {
}
func testBlobsUndeleteImpl(c *chk.C, bsu ServiceURL) error {
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, _ := createNewBlockBlob(c, containerURL)
_, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
c.Assert(err, chk.IsNil) // This call will not have errors related to slow update of service properties, so we assert.
_, err = blobURL.Undelete(ctx)
if err != nil { // We want to give the wrapper method a chance to check if it was an error related to the service properties update.
return err
}
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
if err != nil {
return errors.New(string(err.(StorageError).ServiceCode()))
}
c.Assert(resp.BlobType(), chk.Equals, BlobBlockBlob) // We could check any property. This is just to double check it was undeleted.
//containerURL, _ := createNewContainer(c, bsu)
//defer deleteContainer(c, containerURL)
//blobURL, _ := createNewBlockBlob(c, containerURL)
//
//_, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
//c.Assert(err, chk.IsNil) // This call will not have errors related to slow update of service properties, so we assert.
//
//_, err = blobURL.Undelete(ctx)
//if err != nil { // We want to give the wrapper method a chance to check if it was an error related to the service properties update.
// return err
//}
//
//resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
//if err != nil {
// return errors.New(string(err.(StorageError).ServiceCode()))
//}
//c.Assert(resp.BlobType(), chk.Equals, BlobBlockBlob) // We could check any property. This is just to double check it was undeleted.
return nil
}
@ -1951,8 +1920,8 @@ func (s *aztestsSuite) TestBlobURLPartsSASQueryTimes(c *chk.C) {
func (s *aztestsSuite) TestDownloadBlockBlobUnexpectedEOF(c *chk.C) {
bsu := getBSU()
cURL, _ := createNewContainer(c, bsu)
defer delContainer(c, cURL)
bURL, _ := createNewBlockBlob(c, cURL) // This uploads for us.
resp, err := bURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)

Просмотреть файл

@ -48,7 +48,7 @@ func (s *aztestsSuite) TestStageGetBlocks(c *chk.C) {
c.Assert(blockList.CommittedBlocks, chk.HasLen, 0)
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 1)
listResp, err := blob.CommitBlockList(context.Background(), []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
listResp, err := blob.CommitBlockList(context.Background(), []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(listResp.Response().StatusCode, chk.Equals, 201)
c.Assert(listResp.LastModified().IsZero(), chk.Equals, false)
@ -88,7 +88,7 @@ func (s *aztestsSuite) TestStageBlockFromURL(c *chk.C) {
destBlob := container.NewBlockBlobURL(generateBlobName())
// Prepare source blob for copy.
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
@ -134,7 +134,7 @@ func (s *aztestsSuite) TestStageBlockFromURL(c *chk.C) {
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 2)
// Commit block list.
listResp, err := destBlob.CommitBlockList(context.Background(), []string{blockID1, blockID2}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
listResp, err := destBlob.CommitBlockList(context.Background(), []string{blockID1, blockID2}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(listResp.Response().StatusCode, chk.Equals, 201)
@ -163,7 +163,7 @@ func (s *aztestsSuite) TestCopyBlockBlobFromURL(c *chk.C) {
destBlob := container.NewBlockBlobURL(generateBlobName())
// Prepare source blob for copy.
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
@ -171,7 +171,7 @@ func (s *aztestsSuite) TestCopyBlockBlobFromURL(c *chk.C) {
srcBlobParts := NewBlobURLParts(srcBlob.URL())
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
@ -184,7 +184,7 @@ func (s *aztestsSuite) TestCopyBlockBlobFromURL(c *chk.C) {
srcBlobURLWithSAS := srcBlobParts.URL()
// Invoke copy blob from URL.
resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:])
resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:], DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.ETag(), chk.Not(chk.Equals), "")
@ -207,11 +207,11 @@ func (s *aztestsSuite) TestCopyBlockBlobFromURL(c *chk.C) {
// Edge case 1: Provide bad MD5 and make sure the copy fails
_, badMD5 := getRandomDataAndReader(16)
_, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, badMD5)
_, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, badMD5, DefaultAccessTier, nil)
c.Assert(err, chk.NotNil)
// Edge case 2: Not providing any source MD5 should see the CRC getting returned instead
resp, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, nil)
resp, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, nil, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.XMsContentCrc64(), chk.Not(chk.Equals), "")
@ -231,7 +231,7 @@ func (s *aztestsSuite) TestBlobSASQueryParamOverrideResponseHeaders(c *chk.C) {
ctx := context.Background() // Use default Background context
blob := container.NewBlockBlobURL(generateBlobName())
uploadResp, err := blob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
uploadResp, err := blob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadResp.Response().StatusCode, chk.Equals, 201)
@ -303,7 +303,7 @@ func (s *aztestsSuite) TestBlobPutBlobNonEmptyBody(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
@ -318,7 +318,7 @@ func (s *aztestsSuite) TestBlobPutBlobHTTPHeaders(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), basicHeaders, nil, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), basicHeaders, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -334,7 +334,7 @@ func (s *aztestsSuite) TestBlobPutBlobMetadataNotEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -348,7 +348,7 @@ func (s *aztestsSuite) TestBlobPutBlobMetadataEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -362,7 +362,7 @@ func (s *aztestsSuite) TestBlobPutBlobMetadataInvalid(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, nil, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{})
_, err := blobURL.Upload(ctx, nil, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(strings.Contains(err.Error(), validationErrorSubstring), chk.Equals, true)
}
@ -374,8 +374,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfModifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateUpload(c, blobURL)
@ -389,8 +388,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfModifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -402,8 +400,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfUnmodifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateUpload(c, blobURL)
@ -417,8 +414,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfUnmodifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -431,8 +427,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfMatchTrue(c *chk.C) {
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}})
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateUpload(c, blobURL)
@ -447,8 +442,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfMatchFalse(c *chk.C) {
_, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}})
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -461,8 +455,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfNoneMatchTrue(c *chk.C) {
_, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}})
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateUpload(c, blobURL)
@ -477,8 +470,7 @@ func (s *aztestsSuite) TestBlobPutBlobIfNoneMatchFalse(c *chk.C) {
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}})
_, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -486,7 +478,7 @@ var blockID string // a single blockID used in tests when only a single ID is ne
func init() {
u := [64]byte{}
binary.BigEndian.PutUint32((u[len(guuid.UUID{}):]), math.MaxUint32)
binary.BigEndian.PutUint32(u[len(guuid.UUID{}):], math.MaxUint32)
blockID = base64.StdEncoding.EncodeToString(u[:])
}
@ -529,7 +521,7 @@ func (s *aztestsSuite) TestBlobGetBlockListCommitted(c *chk.C) {
_, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
resp, err := blobURL.GetBlockList(ctx, BlockListCommitted, LeaseAccessConditions{})
c.Assert(err, chk.IsNil)
@ -575,7 +567,7 @@ func (s *aztestsSuite) TestBlobGetBlockListBothNotEmpty(c *chk.C) {
c.Assert(err, chk.IsNil)
_, err = blobURL.StageBlock(ctx, id.next(), strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, id.issued(), BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, id.issued(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
// Put two uncommitted blocks
@ -613,7 +605,7 @@ func (s *aztestsSuite) TestBlobGetBlockListSnapshot(c *chk.C) {
_, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{})
@ -671,7 +663,7 @@ func (s *aztestsSuite) TestBlobPutBlockListInvalidID(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id[:2]}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id[:2]}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeInvalidBlockID)
}
@ -679,7 +671,7 @@ func (s *aztestsSuite) TestBlobPutBlockListDuplicateBlocks(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id, id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id, id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{})
@ -691,7 +683,7 @@ func (s *aztestsSuite) TestBlobPutBlockListEmptyList(c *chk.C) {
containerURL, blobURL, _ := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{})
@ -703,7 +695,7 @@ func (s *aztestsSuite) TestBlobPutBlockListMetadataEmpty(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -715,7 +707,7 @@ func (s *aztestsSuite) TestBlobPutBlockListMetadataNonEmpty(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -727,7 +719,7 @@ func (s *aztestsSuite) TestBlobPutBlockListHTTPHeaders(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, basicHeaders, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, basicHeaders, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -739,10 +731,10 @@ func (s *aztestsSuite) TestBlobPutBlockListHTTPHeadersEmpty(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -759,13 +751,12 @@ func validateBlobCommitted(c *chk.C, blobURL BlockBlobURL) {
func (s *aztestsSuite) TestBlobPutBlockListIfModifiedSinceTrue(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
currentTime := getRelativeTimeGMT(-10)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateBlobCommitted(c, blobURL)
@ -777,21 +768,19 @@ func (s *aztestsSuite) TestBlobPutBlockListIfModifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceTrue(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
currentTime := getRelativeTimeGMT(10)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateBlobCommitted(c, blobURL)
@ -799,13 +788,12 @@ func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceTrue(c *chk.C) {
func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceFalse(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
defer deleteContainer(c, containerURL)
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -813,11 +801,10 @@ func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceFalse(c *chk.C) {
func (s *aztestsSuite) TestBlobPutBlockListIfMatchTrue(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateBlobCommitted(c, blobURL)
@ -826,11 +813,10 @@ func (s *aztestsSuite) TestBlobPutBlockListIfMatchTrue(c *chk.C) {
func (s *aztestsSuite) TestBlobPutBlockListIfMatchFalse(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -838,11 +824,10 @@ func (s *aztestsSuite) TestBlobPutBlockListIfMatchFalse(c *chk.C) {
func (s *aztestsSuite) TestBlobPutBlockListIfNoneMatchTrue(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
validateBlobCommitted(c, blobURL)
@ -851,11 +836,10 @@ func (s *aztestsSuite) TestBlobPutBlockListIfNoneMatchTrue(c *chk.C) {
func (s *aztestsSuite) TestBlobPutBlockListIfNoneMatchFalse(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time
resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil) // The blob must actually exist to have a modifed time
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}})
_, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, DefaultAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -864,7 +848,7 @@ func (s *aztestsSuite) TestBlobPutBlockListValidateData(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
@ -876,7 +860,7 @@ func (s *aztestsSuite) TestBlobPutBlockListModifyBlob(c *chk.C) {
containerURL, blobURL, id := setupPutBlockListTest(c)
defer deleteContainer(c, containerURL)
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.StageBlock(ctx, "0001", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil)
@ -888,7 +872,7 @@ func (s *aztestsSuite) TestBlobPutBlockListModifyBlob(c *chk.C) {
_, err = blobURL.StageBlock(ctx, "0100", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{"0001", "0011"}, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err = blobURL.CommitBlockList(ctx, []string{"0001", "0011"}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{})
@ -898,3 +882,169 @@ func (s *aztestsSuite) TestBlobPutBlockListModifyBlob(c *chk.C) {
c.Assert(resp.CommittedBlocks[1].Name, chk.Equals, "0011")
c.Assert(resp.UncommittedBlocks, chk.HasLen, 0)
}
func (s *aztestsSuite) TestSetTierOnBlobUpload(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
for _, tier := range []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot} {
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.Upload(ctx, strings.NewReader("Test Data"), basicHeaders, nil, BlobAccessConditions{}, tier, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(resp.AccessTier(), chk.Equals, string(tier))
}
}
func (s *aztestsSuite) TestBlobSetTierOnCommit(c *chk.C) {
bsu := getBSU()
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
for _, tier := range []AccessTierType{AccessTierCool, AccessTierHot} {
blobURL, _ := getBlockBlobURL(c, containerURL)
_, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil)
c.Assert(err, chk.IsNil)
_, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, tier, nil)
resp, err := blobURL.GetBlockList(ctx, BlockListCommitted, LeaseAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(resp.CommittedBlocks, chk.HasLen, 1)
c.Assert(resp.UncommittedBlocks, chk.HasLen, 0)
}
}
func (s *aztestsSuite) TestSetTierOnCopyBlockBlobFromURL(c *chk.C) {
bsu := getBSU()
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
testSize := 1 * 1024 * 1024
r, sourceData := getRandomDataAndReader(testSize)
sourceDataMD5Value := md5.Sum(sourceData)
ctx := context.Background()
srcBlob := container.NewBlockBlobURL(generateBlobName())
// Setting blob tier as "cool"
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, AccessTierCool, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
// Get source blob URL with SAS for StageFromURL.
srcBlobParts := NewBlobURLParts(srcBlob.URL())
credential, err := getGenericCredential("")
if err != nil {
c.Fatal("Invalid credential")
}
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS,
ExpiryTime: time.Now().UTC().Add(2 * time.Hour),
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
Permissions: BlobSASPermissions{Read: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
srcBlobURLWithSAS := srcBlobParts.URL()
for _, tier := range []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot} {
destBlob := container.NewBlockBlobURL(generateBlobName())
resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:], tier, nil)
c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(string(resp.CopyStatus()), chk.DeepEquals, "success")
destBlobPropResp, err := destBlob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(destBlobPropResp.AccessTier(), chk.Equals, string(tier))
}
}
func (s *aztestsSuite) TestSetTierOnStageBlockFromURL(c *chk.C) {
bsu := getBSU()
credential, err := getGenericCredential("")
if err != nil {
c.Fatal("Invalid credential")
}
container, _ := createNewContainer(c, bsu)
defer delContainer(c, container)
testSize := 8 * 1024 * 1024 // 8MB
r, sourceData := getRandomDataAndReader(testSize)
ctx := context.Background() // Use default Background context
srcBlob := container.NewBlockBlobURL(generateBlobName())
destBlob := container.NewBlockBlobURL(generateBlobName())
tier := AccessTierCool
// Prepare source blob for copy.
uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, tier, nil)
c.Assert(err, chk.IsNil)
c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201)
// Get source blob URL with SAS for StageFromURL.
srcBlobParts := NewBlobURLParts(srcBlob.URL())
srcBlobParts.SAS, err = BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: srcBlobParts.ContainerName,
BlobName: srcBlobParts.BlobName,
Permissions: BlobSASPermissions{Read: true}.String(),
}.NewSASQueryParameters(credential)
if err != nil {
c.Fatal(err)
}
srcBlobURLWithSAS := srcBlobParts.URL()
// Stage blocks from URL.
blockID1, blockID2 := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))), base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 1)))
stageResp1, err := destBlob.StageBlockFromURL(ctx, blockID1, srcBlobURLWithSAS, 0, 4*1024*1024, LeaseAccessConditions{}, ModifiedAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(stageResp1.Response().StatusCode, chk.Equals, 201)
c.Assert(stageResp1.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.RequestID(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.Version(), chk.Not(chk.Equals), "")
c.Assert(stageResp1.Date().IsZero(), chk.Equals, false)
stageResp2, err := destBlob.StageBlockFromURL(ctx, blockID2, srcBlobURLWithSAS, 4*1024*1024, CountToEnd, LeaseAccessConditions{}, ModifiedAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(stageResp2.Response().StatusCode, chk.Equals, 201)
c.Assert(stageResp2.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.RequestID(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.Version(), chk.Not(chk.Equals), "")
c.Assert(stageResp2.Date().IsZero(), chk.Equals, false)
// Check block list.
blockList, err := destBlob.GetBlockList(context.Background(), BlockListAll, LeaseAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(blockList.Response().StatusCode, chk.Equals, 200)
c.Assert(blockList.CommittedBlocks, chk.HasLen, 0)
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 2)
// Commit block list.
listResp, err := destBlob.CommitBlockList(context.Background(), []string{blockID1, blockID2}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, tier, nil)
c.Assert(err, chk.IsNil)
c.Assert(listResp.Response().StatusCode, chk.Equals, 201)
// Check data integrity through downloading.
downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil)
destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{}))
c.Assert(err, chk.IsNil)
c.Assert(destData, chk.DeepEquals, sourceData)
// Get properties to validate the tier
destBlobPropResp, err := destBlob.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
c.Assert(destBlobPropResp.AccessTier(), chk.Equals, string(tier))
}

Просмотреть файл

@ -124,8 +124,7 @@ func (s *aztestsSuite) TestContainerCreateAccessContainer(c *chk.C) {
c.Assert(err, chk.IsNil)
blobURL := containerURL.NewBlockBlobURL(blobPrefix)
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{},
basicMetadata, BlobAccessConditions{})
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
// Anonymous enumeration should be valid with container access
containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
@ -150,13 +149,12 @@ func (s *aztestsSuite) TestContainerCreateAccessBlob(c *chk.C) {
c.Assert(err, chk.IsNil)
blobURL := containerURL.NewBlockBlobURL(blobPrefix)
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{},
basicMetadata, BlobAccessConditions{})
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
// Reference the same container URL but with anonymous credentials
containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
_, err = containerURL2.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{})
validateStorageError(c, err, ServiceCodeResourceNotFound) // Listing blobs is not publicly accessible
validateStorageError(c, err, ServiceCodeNoAuthenticationInformation) // Listing blobs is not publicly accessible
// Accessing blob specific data should be public
blobURL2 := containerURL2.NewBlockBlobURL(blobPrefix)
@ -173,21 +171,20 @@ func (s *aztestsSuite) TestContainerCreateAccessNone(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL := containerURL.NewBlockBlobURL(blobPrefix)
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{},
basicMetadata, BlobAccessConditions{})
blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultAccessTier, nil)
// Reference the same container URL but with anonymous credentials
containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
// Listing blobs is not public
_, err = containerURL2.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{})
validateStorageError(c, err, ServiceCodeResourceNotFound)
validateStorageError(c, err, ServiceCodeNoAuthenticationInformation)
// Blob data is not public
blobURL2 := containerURL2.NewBlockBlobURL(blobPrefix)
_, err = blobURL2.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.NotNil)
serr := err.(StorageError)
c.Assert(serr.Response().StatusCode, chk.Equals, 404) // HEAD request does not return a status code
c.Assert(serr.Response().StatusCode, chk.Equals, 401) // HEAD request does not return a status code
}
func validateContainerDeleted(c *chk.C, containerURL ContainerURL) {
@ -386,7 +383,7 @@ func (s *aztestsSuite) TestContainerListBlobsIncludeTypeCopy(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, blobName := createNewBlockBlob(c, containerURL)
blobCopyURL, blobCopyName := createBlockBlobWithPrefix(c, containerURL, "copy")
_, err := blobCopyURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{})
_, err := blobCopyURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{},
@ -424,16 +421,24 @@ func testContainerListBlobsIncludeTypeDeletedImpl(c *chk.C, bsu ServiceURL) erro
defer deleteContainer(c, containerURL)
blobURL, _ := createNewBlockBlob(c, containerURL)
_, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{},
ListBlobsSegmentOptions{Details: BlobListingDetails{Versions: true, Deleted: true}})
c.Assert(err, chk.IsNil)
c.Assert(resp.Segment.BlobItems, chk.HasLen, 1)
_, err = blobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{},
ListBlobsSegmentOptions{Details: BlobListingDetails{Deleted: true}})
resp, err = containerURL.ListBlobsFlatSegment(ctx, Marker{},
ListBlobsSegmentOptions{Details: BlobListingDetails{Versions: true, Deleted: true}})
c.Assert(err, chk.IsNil)
if len(resp.Segment.BlobItems) != 1 {
return errors.New("DeletedBlobNotFound")
}
c.Assert(resp.Segment.BlobItems[0].Deleted, chk.Equals, true)
// TODO: => Write function to enable/disable versioning from code itself.
// resp.Segment.BlobItems[0].Deleted == true/false if versioning is disabled/enabled.
c.Assert(resp.Segment.BlobItems[0].Deleted, chk.Equals, false)
return nil
}
@ -448,29 +453,29 @@ func testContainerListBlobsIncludeMultipleImpl(c *chk.C, bsu ServiceURL) error {
containerURL, _ := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
blobURL, blobName := createBlockBlobWithPrefix(c, containerURL, "z")
blobURL, _ := createBlockBlobWithPrefix(c, containerURL, "z")
_, err := blobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
blobURL2, blobName2 := createBlockBlobWithPrefix(c, containerURL, "copy")
resp2, err := blobURL2.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{})
blobURL2, _ := createBlockBlobWithPrefix(c, containerURL, "copy")
resp2, err := blobURL2.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
waitForCopy(c, blobURL2, resp2)
blobURL3, blobName3 := createBlockBlobWithPrefix(c, containerURL, "deleted")
blobURL3, _ := createBlockBlobWithPrefix(c, containerURL, "deleted")
_, err = blobURL3.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{},
ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true, Copy: true, Deleted: true}})
ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true, Copy: true, Deleted: true, Versions: true}})
c.Assert(err, chk.IsNil)
if len(resp.Segment.BlobItems) != 5 { // If there are fewer blobs in the container than there should be, it will be because one was permanently deleted.
if len(resp.Segment.BlobItems) != 6 {
// If there are fewer blobs in the container than there should be, it will be because one was permanently deleted.
return errors.New("DeletedBlobNotFound")
}
c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName2)
c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName2) // With soft delete, the overwritten blob will have a backup snapshot
c.Assert(resp.Segment.BlobItems[2].Name, chk.Equals, blobName3)
c.Assert(resp.Segment.BlobItems[3].Name, chk.Equals, blobName)
c.Assert(resp.Segment.BlobItems[3].Snapshot, chk.NotNil)
c.Assert(resp.Segment.BlobItems[4].Name, chk.Equals, blobName)
//c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName2)
//c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName) // With soft delete, the overwritten blob will have a backup snapshot
//c.Assert(resp.Segment.BlobItems[2].Name, chk.Equals, blobName)
return nil
}
@ -577,19 +582,21 @@ func (s *aztestsSuite) TestContainerGetSetPermissionsMultiplePolicies(c *chk.C)
start := generateCurrentTimeWithModerateResolution()
expiry := start.Add(5 * time.Minute)
expiry2 := start.Add(time.Minute)
readWrite := AccessPolicyPermission{Read: true, Write: true}.String()
readOnly := AccessPolicyPermission{Read: true}.String()
permissions := []SignedIdentifier{
{ID: "0000",
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{Read: true, Write: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &readWrite,
},
},
{ID: "0001",
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry2,
Permission: AccessPolicyPermission{Read: true}.String(),
Start: &start,
Expiry: &expiry2,
Permission: &readOnly,
},
},
}
@ -639,7 +646,7 @@ func (s *aztestsSuite) TestContainerSetPermissionsPublicAccessNone(c *chk.C) {
resp, _ := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{})
// If we cannot access a blob's data, we will also not be able to enumerate blobs
validateStorageError(c, err, ServiceCodeResourceNotFound)
validateStorageError(c, err, ServiceCodeNoAuthenticationInformation)
c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessNone)
}
@ -683,12 +690,13 @@ func (s *aztestsSuite) TestContainerSetPermissionsACLSinglePolicy(c *chk.C) {
start := time.Now().UTC().Add(-15 * time.Second)
expiry := start.Add(5 * time.Minute).UTC()
listOnly := AccessPolicyPermission{List: true}.String()
permissions := []SignedIdentifier{{
ID: "0000",
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}}
_, err = containerURL.SetAccessPolicy(ctx, PublicAccessNone, permissions, ContainerAccessConditions{})
@ -715,7 +723,7 @@ func (s *aztestsSuite) TestContainerSetPermissionsACLSinglePolicy(c *chk.C) {
anonymousBlobService := NewServiceURL(bsu.URL(), sasPipeline)
anonymousContainer := anonymousBlobService.NewContainerURL(containerName)
_, err = anonymousContainer.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{})
validateStorageError(c, err, ServiceCodeResourceNotFound)
validateStorageError(c, err, ServiceCodeNoAuthenticationInformation)
}
func (s *aztestsSuite) TestContainerSetPermissionsACLMoreThanFive(c *chk.C) {
@ -727,13 +735,14 @@ func (s *aztestsSuite) TestContainerSetPermissionsACLMoreThanFive(c *chk.C) {
start := time.Now().UTC()
expiry := start.Add(5 * time.Minute).UTC()
permissions := make([]SignedIdentifier, 6, 6)
listOnly := AccessPolicyPermission{Read: true}.String()
for i := 0; i < 6; i++ {
permissions[i] = SignedIdentifier{
ID: "000" + strconv.Itoa(i),
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}
}
@ -750,14 +759,15 @@ func (s *aztestsSuite) TestContainerSetPermissionsDeleteAndModifyACL(c *chk.C) {
start := generateCurrentTimeWithModerateResolution()
expiry := start.Add(5 * time.Minute).UTC()
listOnly := AccessPolicyPermission{Read: true}.String()
permissions := make([]SignedIdentifier, 2, 2)
for i := 0; i < 2; i++ {
permissions[i] = SignedIdentifier{
ID: "000" + strconv.Itoa(i),
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}
}
@ -788,13 +798,14 @@ func (s *aztestsSuite) TestContainerSetPermissionsDeleteAllPolicies(c *chk.C) {
start := time.Now().UTC()
expiry := start.Add(5 * time.Minute).UTC()
permissions := make([]SignedIdentifier, 2, 2)
listOnly := AccessPolicyPermission{Read: true}.String()
for i := 0; i < 2; i++ {
permissions[i] = SignedIdentifier{
ID: "000" + strconv.Itoa(i),
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}
}
@ -820,13 +831,14 @@ func (s *aztestsSuite) TestContainerSetPermissionsInvalidPolicyTimes(c *chk.C) {
expiry := time.Now().UTC()
start := expiry.Add(5 * time.Minute).UTC()
permissions := make([]SignedIdentifier, 2, 2)
listOnly := AccessPolicyPermission{Read: true}.String()
for i := 0; i < 2; i++ {
permissions[i] = SignedIdentifier{
ID: "000" + strconv.Itoa(i),
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}
}
@ -858,13 +870,14 @@ func (s *aztestsSuite) TestContainerSetPermissionsSignedIdentifierTooLong(c *chk
expiry := time.Now().UTC()
start := expiry.Add(5 * time.Minute).UTC()
permissions := make([]SignedIdentifier, 2, 2)
listOnly := AccessPolicyPermission{Read: true}.String()
for i := 0; i < 2; i++ {
permissions[i] = SignedIdentifier{
ID: id,
AccessPolicy: AccessPolicy{
Start: start,
Expiry: expiry,
Permission: AccessPolicyPermission{List: true}.String(),
Start: &start,
Expiry: &expiry,
Permission: &listOnly,
},
}
}

Просмотреть файл

@ -293,7 +293,7 @@ func (s *aztestsSuite) TestBlobCreatePageSizeInvalid(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, 1, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.Create(ctx, 1, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
validateStorageError(c, err, ServiceCodeInvalidHeaderValue)
}
@ -303,7 +303,7 @@ func (s *aztestsSuite) TestBlobCreatePageSequenceInvalid(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, PageBlobPageBytes, -1, BlobHTTPHeaders{}, nil, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, -1, BlobHTTPHeaders{}, nil, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
c.Assert(err, chk.Not(chk.IsNil))
}
@ -313,7 +313,7 @@ func (s *aztestsSuite) TestBlobCreatePageMetadataNonEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
@ -326,7 +326,7 @@ func (s *aztestsSuite) TestBlobCreatePageMetadataEmpty(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultPremiumBlobAccessTier, nil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
c.Assert(err, chk.IsNil)
@ -339,7 +339,7 @@ func (s *aztestsSuite) TestBlobCreatePageMetadataInvalid(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{"In valid1": "bar"}, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{"In valid1": "bar"}, BlobAccessConditions{}, PremiumPageBlobAccessTierNone, nil)
c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true)
}
@ -350,7 +350,7 @@ func (s *aztestsSuite) TestBlobCreatePageHTTPHeaders(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := getPageBlobURL(c, containerURL)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, basicHeaders, nil, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, basicHeaders, nil, BlobAccessConditions{}, PremiumPageBlobAccessTierNone, nil)
c.Assert(err, chk.IsNil)
resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
@ -373,8 +373,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfModifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultPremiumBlobAccessTier, nil)
c.Assert(err, chk.IsNil)
validatePageBlobPut(c, blobURL)
@ -388,8 +387,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfModifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, DefaultPremiumBlobAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -401,8 +399,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfUnmodifiedSinceTrue(c *chk.C) {
currentTime := getRelativeTimeGMT(10)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultPremiumBlobAccessTier, nil)
c.Assert(err, chk.IsNil)
validatePageBlobPut(c, blobURL)
@ -416,8 +413,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfUnmodifiedSinceFalse(c *chk.C) {
currentTime := getRelativeTimeGMT(-10)
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, DefaultPremiumBlobAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -429,8 +425,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfMatchTrue(c *chk.C) {
resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, PremiumPageBlobAccessTierNone, nil)
c.Assert(err, chk.IsNil)
validatePageBlobPut(c, blobURL)
@ -442,8 +437,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfMatchFalse(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, PremiumPageBlobAccessTierNone, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}
@ -453,8 +447,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfNoneMatchTrue(c *chk.C) {
defer deleteContainer(c, containerURL)
blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, PremiumPageBlobAccessTierNone, nil)
c.Assert(err, chk.IsNil)
validatePageBlobPut(c, blobURL)
@ -468,8 +461,7 @@ func (s *aztestsSuite) TestBlobCreatePageIfNoneMatchFalse(c *chk.C) {
resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata,
BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}})
_, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, DefaultPremiumBlobAccessTier, nil)
validateStorageError(c, err, ServiceCodeConditionNotMet)
}

Просмотреть файл

@ -18,6 +18,7 @@ func (s *aztestsSuite) TestGetAccountInfo(c *chk.C) {
// Test on a container
cURL := sa.NewContainerURL(generateContainerName())
defer delContainer(c, cURL)
_, err = cURL.Create(ctx, Metadata{}, PublicAccessNone)
c.Assert(err, chk.IsNil)
cAccInfo, err := cURL.GetAccountInfo(ctx)
@ -26,7 +27,7 @@ func (s *aztestsSuite) TestGetAccountInfo(c *chk.C) {
// test on a block blob URL. They all call the same thing on the base URL, so only one test is needed for that.
bbURL := cURL.NewBlockBlobURL(generateBlobName())
_, err = bbURL.Upload(ctx, strings.NewReader("blah"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = bbURL.Upload(ctx, strings.NewReader("blah"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
c.Assert(err, chk.IsNil)
bAccInfo, err := bbURL.GetAccountInfo(ctx)
c.Assert(err, chk.IsNil)

Просмотреть файл

@ -28,6 +28,7 @@ func (s *aztestsSuite) TestUserDelegationSASContainer(c *chk.C) {
c.Fatal(err)
}
// Prepare User Delegation SAS query
cSAS, err := BlobSASSignatureValues{
Protocol: SASProtocolHTTPS,
StartTime: currentTime,
@ -35,6 +36,9 @@ func (s *aztestsSuite) TestUserDelegationSASContainer(c *chk.C) {
Permissions: "racwdl",
ContainerName: containerName,
}.NewSASQueryParameters(cudk)
if err != nil {
c.Fatal(err)
}
// Create anonymous pipeline
p = NewPipeline(NewAnonymousCredential(), PipelineOptions{})
@ -52,7 +56,7 @@ func (s *aztestsSuite) TestUserDelegationSASContainer(c *chk.C) {
cSASURL := NewContainerURL(cURL, p)
bblob := cSASURL.NewBlockBlobURL("test")
_, err = bblob.Upload(ctx, strings.NewReader("hello world!"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
_, err = bblob.Upload(ctx, strings.NewReader("hello world!"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
c.Fatal(err)
}
@ -130,7 +134,7 @@ func (s *aztestsSuite) TestUserDelegationSASBlob(c *chk.C) {
c.Fatal(err)
}
data := "Hello World!"
_, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
_, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}, DefaultAccessTier, nil)
if err != nil {
c.Fatal(err)
}

Просмотреть файл

@ -47,13 +47,17 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided
// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm
// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the
// x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a blob if it
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled.
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the
// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a
// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@ -62,7 +66,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@ -74,7 +78,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek
}
// appendBlockPreparer prepares the AppendBlock request.
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -110,6 +114,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@ -122,6 +129,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -155,31 +165,35 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
// if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
// resource's lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes
// permitted for the append blob. If the Append Block operation would cause the blob to exceed that limit or if the
// blob size is already greater than the value specified in this header, the request will fail with
// MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). appendPosition is optional
// conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append
// Block will succeed only if the append position is equal to this number. If it is not, the request will fail with the
// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
// value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this header value to
// operate only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify
// this header value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch
// is specify an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value
// to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this
// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append
// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value
// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A
// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this
// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 -
// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a
// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is
// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@ -191,7 +205,7 @@ func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL
}
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -225,6 +239,9 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@ -246,6 +263,9 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}
@ -300,20 +320,24 @@ func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Respons
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
// header is provided. ifModifiedSince is specify this header value to operate only on a blob if it has been modified
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a
// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is
// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
// storage analytics logging is enabled.
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince
// is specify this header value to operate only on a blob if it has been modified since the specified date/time.
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on
// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value
// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
// blobTagsString is optional. Used to set blob tags in various blob operations.
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*AppendBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@ -325,7 +349,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64,
}
// createPreparer prepares the Create request.
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -371,6 +395,9 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@ -383,10 +410,16 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if blobTagsString != nil {
req.Header.Set("x-ms-tags", *blobTagsString)
}
req.Header.Set("x-ms-blob-type", "AppendBlob")
return req, nil
}
@ -401,3 +434,84 @@ func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline
resp.Response().Body.Close()
return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err
}
// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12
// version or later.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if
// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional
// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will
// succeed only if the append position is equal to this number. If it is not, the request will fail with the
// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req)
if err != nil {
return nil, err
}
return resp.(*AppendBlobSealResponse), err
}
// sealPreparer prepares the Seal request.
func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("comp", "seal")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatch != nil {
req.Header.Set("If-Match", string(*ifMatch))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if appendPosition != nil {
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
}
return req, nil
}
// sealResponder handles the response to the Seal request.
func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &AppendBlobSealResponse{rawResponse: resp.Response()}, err
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -57,20 +57,25 @@ func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the
// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key
// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
// provided. tier is optional. Indicates the tier to be set on the blob. ifModifiedSince is specify this header value
// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this
// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify
// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only
// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to
// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account
// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional.
// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob
// operations.
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobCommitBlockListResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@ -82,7 +87,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL
}
// commitBlockListPreparer prepares the CommitBlockList request.
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -134,6 +139,9 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if tier != AccessTierNone {
req.Header.Set("x-ms-access-tier", string(tier))
}
@ -149,10 +157,16 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if blobTagsString != nil {
req.Header.Set("x-ms-tags", *blobTagsString)
}
b, err := xml.Marshal(blocks)
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
@ -186,16 +200,17 @@ func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with
// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID)
req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID)
if err != nil {
return nil, err
}
@ -207,7 +222,7 @@ func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockLi
}
// getBlockListPreparer prepares the GetBlockList request.
func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -225,6 +240,9 @@ func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snaps
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -273,9 +291,12 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip
// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
// if the x-ms-encryption-key header is provided. requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (*BlockBlobStageBlockResponse, error) {
// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
// analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@ -284,7 +305,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, requestID)
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID)
if err != nil {
return nil, err
}
@ -296,7 +317,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co
}
// stageBlockPreparer prepares the StageBlock request.
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (pipeline.Request, error) {
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -327,6 +348,9 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -361,21 +385,24 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel
// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
// provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
// resource's lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header
// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify
// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate
// only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later.
// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified,
// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for
// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and
// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been
// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate
// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
// in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@ -387,7 +414,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str
}
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -419,6 +446,9 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@ -480,14 +510,18 @@ func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response)
// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services.
// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key
// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the
// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. tier is optional.
// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
// in the analytics logs when storage analytics logging is enabled.
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is
// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data
// provided in the request. If not specified, encryption is performed with the default account encryption scope. For
// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set
// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since
// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a
// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations.
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobUploadResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@ -496,7 +530,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString)
if err != nil {
return nil, err
}
@ -508,7 +542,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co
}
// uploadPreparer prepares the Upload request.
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -557,6 +591,9 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if tier != AccessTierNone {
req.Header.Set("x-ms-access-tier", string(tier))
}
@ -572,10 +609,16 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if blobTagsString != nil {
req.Header.Set("x-ms-tags", *blobTagsString)
}
req.Header.Set("x-ms-blob-type", "BlockBlob")
return req, nil
}

Просмотреть файл

@ -10,7 +10,7 @@ import (
const (
// ServiceVersion specifies the version of the operations used in this package.
ServiceVersion = "2019-02-02"
ServiceVersion = "2019-12-12"
)
// managementClient is the base client for Azblob.

Просмотреть файл

@ -259,14 +259,18 @@ func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipe
// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be
// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) {
// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on
// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and
// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the
// container.
func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(timeout, metadata, access, requestID)
req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride)
if err != nil {
return nil, err
}
@ -278,7 +282,7 @@ func (client containerClient) Create(ctx context.Context, timeout *int32, metada
}
// createPreparer prepares the Create request.
func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) {
func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -301,6 +305,12 @@ func (client containerClient) createPreparer(timeout *int32, metadata map[string
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if defaultEncryptionScope != nil {
req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope)
}
if preventEncryptionScopeOverride != nil {
req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride))
}
return req, nil
}
@ -881,6 +891,70 @@ func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipel
return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err
}
// Restore restores a previously-deleted container.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
// deletedContainerName is optional. Version 2019-12-12 and laster. Specifies the name of the deleted container to
// restore. deletedContainerVersion is optional. Version 2019-12-12 and laster. Specifies the version of the deleted
// container to restore.
func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerRestoreResponse), err
}
// restorePreparer prepares the Restore request.
func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
params.Set("restype", "container")
params.Set("comp", "undelete")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if deletedContainerName != nil {
req.Header.Set("x-ms-deleted-container-name", *deletedContainerName)
}
if deletedContainerVersion != nil {
req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion)
}
return req, nil
}
// restoreResponder handles the response to the Restore request.
func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &ContainerRestoreResponse{rawResponse: resp.Response()}, err
}
// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a
// container may be accessed publicly.
//

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -38,23 +38,26 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
// header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it
// has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
// value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with
// a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
@ -66,7 +69,7 @@ func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64
}
// clearPagesPreparer prepares the ClearPages request.
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -93,6 +96,9 @@ func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *in
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
@ -235,22 +241,26 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p
// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the
// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is
// "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header
// value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify
// this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is
// specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to
// operate only on blobs without a matching value. blobSequenceNumber is set for page blobs only. The sequence number
// is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0
// and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in
// the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version
// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the
// request. If not specified, encryption is performed with the default account encryption scope. For more information,
// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations.
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (*PageBlobCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString)
if err != nil {
return nil, err
}
@ -262,7 +272,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, bl
}
// createPreparer prepares the Create request.
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -311,6 +321,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@ -323,6 +336,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
@ -331,6 +347,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
if blobTagsString != nil {
req.Header.Set("x-ms-tags", *blobTagsString)
}
req.Header.Set("x-ms-blob-type", "PageBlob")
return req, nil
}
@ -359,17 +378,18 @@ func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.R
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL
// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated,
// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
// enabled.
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@ -381,7 +401,7 @@ func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string
}
// getPageRangesPreparer prepares the GetPageRanges request.
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -413,6 +433,9 @@ func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *in
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -457,22 +480,25 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled.
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header
// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the
// target blob. The response will only contain pages that were changed between the target blob and its previous
// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the
// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to
// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@ -484,7 +510,7 @@ func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *st
}
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -501,6 +527,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout
}
params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode()
if prevSnapshotURL != nil {
req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL)
}
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
@ -519,6 +548,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -563,20 +595,23 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response)
// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the
// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the
// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a
// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies
// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is
// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage
// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
@ -588,7 +623,7 @@ func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64
}
// resizePreparer prepares the Resize request.
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -611,6 +646,9 @@ func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *in
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
@ -738,16 +776,20 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons
// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be
// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the
// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
// header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it
// has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
// value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with
// a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption
// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default
// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
// analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) {
if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
@ -756,7 +798,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID)
if err != nil {
return nil, err
}
@ -768,7 +810,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker
}
// uploadPagesPreparer prepares the UploadPages request.
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -801,6 +843,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
}
@ -822,6 +867,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
@ -857,29 +905,32 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel
// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of
// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is
// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be
// provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the
// resource's lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to
// operate only on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is
// specify this header value to operate only on a blob if it has a sequence number less than the specified.
// ifSequenceNumberEqualTo is specify this header value to operate only on a blob if it has the specified sequence
// number. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the
// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been
// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching
// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince
// is specify this header value to operate only on a blob if it has been modified since the specified date/time.
// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later.
// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified,
// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for
// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and
// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has
// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to
// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this
// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this
// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is
// specify this header value to operate only on a blob if it has not been modified since the specified date/time.
// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag
// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to
// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a
// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag
// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on
// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit
// that is recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID)
if err != nil {
return nil, err
}
@ -891,7 +942,7 @@ func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL s
}
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -921,6 +972,9 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source
if encryptionAlgorithm != EncryptionAlgorithmNone {
req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm))
}
if encryptionScope != nil {
req.Header.Set("x-ms-encryption-scope", *encryptionScope)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
@ -945,6 +999,9 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if ifTags != nil {
req.Header.Set("x-ms-if-tags", *ifTags)
}
if sourceIfModifiedSince != nil {
req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123))
}

Просмотреть файл

@ -25,6 +25,98 @@ func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
return serviceClient{newManagementClient(url, p)}
}
// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given
// search expression. Filter blobs searches across all containers within a storage account but can be scoped within
// the expression to a single container.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters
// the results to return only to return only blobs whose tags match the specified expression. marker is a string value
// that identifies the portion of the list of containers to be returned with the next listing operation. The operation
// returns the NextMarker value within the response body if the listing operation did not return all containers
// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter
// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is
// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a
// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a
// partition boundary, then the service will return a continuation token for retrieving the remainder of the results.
// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the
// default of 5000.
func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false,
chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req)
if err != nil {
return nil, err
}
return resp.(*FilterBlobSegment), err
}
// filterBlobsPreparer prepares the FilterBlobs request.
func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
}
if where != nil && len(*where) > 0 {
params.Set("where", *where)
}
if marker != nil && len(*marker) > 0 {
params.Set("marker", *marker)
}
if maxresults != nil {
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
}
params.Set("comp", "blobs")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// filterBlobsResponder handles the response to the FilterBlobs request.
func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &FilterBlobSegment{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, err
}
if len(b) > 0 {
b = removeBOM(b)
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// GetAccountInfo returns the sku name and account kind
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
req, err := client.getAccountInfoPreparer()
@ -300,7 +392,7 @@ func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
if err := validate([]validation{
{targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false,
@ -322,7 +414,7 @@ func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *s
}
// listContainersSegmentPreparer prepares the ListContainersSegment request.
func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
@ -337,8 +429,8 @@ func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker
if maxresults != nil {
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
}
if include != ListContainersIncludeNone {
params.Set("include", string(include))
if include != nil && len(include) > 0 {
params.Set("include", joinConst(include, ","))
}
if timeout != nil {
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))

Просмотреть файл

@ -5,7 +5,7 @@ package azblob
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string {
return "Azure-SDK-For-Go/0.0.0 azblob/2019-02-02"
return "Azure-SDK-For-Go/0.0.0 azblob/2019-12-12"
}
// Version returns the semantic version (see http://semver.org) of the client.

28
azure-pipelines.yml Normal file
Просмотреть файл

@ -0,0 +1,28 @@
trigger:
- master
- dev
pool:
vmImage: 'ubuntu-latest'
steps:
- task: GoTool@0
inputs:
version: '1.15'
- script: |
go build ./azblob
displayName: 'Compile the SDK'
- script: |
go test -race -short -cover -v ./azblob
env:
ACCOUNT_NAME: $(ACCOUNT_NAME)
ACCOUNT_KEY: $(ACCOUNT_KEY)
BLOB_STORAGE_ACCOUNT_NAME: $(BLOB_STORAGE_ACCOUNT_NAME)
BLOB_STORAGE_ACCOUNT_KEY: $(BLOB_STORAGE_ACCOUNT_KEY)
PREMIUM_ACCOUNT_NAME: $(PREMIUM_ACCOUNT_NAME)
PREMIUM_ACCOUNT_KEY: $(PREMIUM_ACCOUNT_KEY)
SECONDARY_ACCOUNT_NAME: $(SECONDARY_ACCOUNT_NAME)
SECONDARY_ACCOUNT_KEY: $(SECONDARY_ACCOUNT_KEY)
APPLICATION_ID: $(APPLICATION_ID)
CLIENT_SECRET: $(CLIENT_SECRET)
TENANT_ID: $(TENANT_ID)

13
go.mod
Просмотреть файл

@ -1,13 +1,12 @@
module github.com/Azure/azure-storage-blob-go
go 1.13
go 1.15
require (
github.com/Azure/azure-pipeline-go v0.2.2
github.com/Azure/go-autorest/autorest/adal v0.8.3
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/go-autorest/autorest/adal v0.9.2
github.com/google/uuid v1.1.1
github.com/kr/pretty v0.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
golang.org/x/sys v0.0.0-20190412213103-97732733099d
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
golang.org/x/sys v0.0.0-20200828194041-157a740278f4
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
)

56
go.sum
Просмотреть файл

@ -1,42 +1,42 @@
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4 h1:kCCpuwSAoYJPkNc6x0xT9yTtV4oKtARo4RGBQWOfg9E=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

Разница между файлами не показана из-за своего большого размера Загрузить разницу