Renamed APIs with significant changes

This commit is contained in:
zezha-msft 2018-04-23 15:28:52 -07:00
Родитель 70edea0000
Коммит b67c506542
57 изменённых файлов: 8554 добавлений и 5659 удалений

134
2017-07-29/azblob/access_conditions.go Normal file → Executable file
Просмотреть файл

@ -1,67 +1,67 @@
package azblob package azblob
import ( import (
"time" "time"
) )
// HTTPAccessConditions identifies standard HTTP access conditions which you optionally set. // HTTPAccessConditions identifies standard HTTP access conditions which you optionally set.
type HTTPAccessConditions struct { type HTTPAccessConditions struct {
IfModifiedSince time.Time IfModifiedSince time.Time
IfUnmodifiedSince time.Time IfUnmodifiedSince time.Time
IfMatch ETag IfMatch ETag
IfNoneMatch ETag IfNoneMatch ETag
} }
// pointers is for internal infrastructure. It returns the fields as pointers. // pointers is for internal infrastructure. It returns the fields as pointers.
func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { func (ac HTTPAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
if !ac.IfModifiedSince.IsZero() { if !ac.IfModifiedSince.IsZero() {
ims = &ac.IfModifiedSince ims = &ac.IfModifiedSince
} }
if !ac.IfUnmodifiedSince.IsZero() { if !ac.IfUnmodifiedSince.IsZero() {
ius = &ac.IfUnmodifiedSince ius = &ac.IfUnmodifiedSince
} }
if ac.IfMatch != ETagNone { if ac.IfMatch != ETagNone {
ime = &ac.IfMatch ime = &ac.IfMatch
} }
if ac.IfNoneMatch != ETagNone { if ac.IfNoneMatch != ETagNone {
inme = &ac.IfNoneMatch inme = &ac.IfNoneMatch
} }
return return
} }
// ContainerAccessConditions identifies container-specific access conditions which you optionally set. // ContainerAccessConditions identifies container-specific access conditions which you optionally set.
type ContainerAccessConditions struct { type ContainerAccessConditions struct {
HTTPAccessConditions HTTPAccessConditions
LeaseAccessConditions LeaseAccessConditions
} }
// BlobAccessConditions identifies blob-specific access conditions which you optionally set. // BlobAccessConditions identifies blob-specific access conditions which you optionally set.
type BlobAccessConditions struct { type BlobAccessConditions struct {
HTTPAccessConditions HTTPAccessConditions
LeaseAccessConditions LeaseAccessConditions
AppendBlobAccessConditions AppendBlobAccessConditions
PageBlobAccessConditions PageBlobAccessConditions
} }
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. // LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
type LeaseAccessConditions struct { type LeaseAccessConditions struct {
LeaseID string LeaseID string
} }
// pointers is for internal infrastructure. It returns the fields as pointers. // pointers is for internal infrastructure. It returns the fields as pointers.
func (ac LeaseAccessConditions) pointers() (leaseID *string) { func (ac LeaseAccessConditions) pointers() (leaseID *string) {
if ac.LeaseID != "" { if ac.LeaseID != "" {
leaseID = &ac.LeaseID leaseID = &ac.LeaseID
} }
return return
} }
/* /*
// getInt32 is for internal infrastructure. It is used with access condition values where // getInt32 is for internal infrastructure. It is used with access condition values where
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header // 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
// and the privately-storage field in the access condition object is stored as +1 higher than desired. // and the privately-storage field in the access condition object is stored as +1 higher than desired.
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). // THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
func getInt32(value int32) (bool, int32) { func getInt32(value int32) (bool, int32) {
return value > 0, value - 1 return value > 0, value - 1
} }
*/ */

Просмотреть файл

@ -0,0 +1,79 @@
package azblob
import "sync/atomic"
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt32(target *int32, morpher AtomicMorpherInt32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadInt32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint32(target *uint32, morpher AtomicMorpherUint32) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphInt64(target *int64, morpher AtomicMorpherInt64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadInt64(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) {
return morphResult
}
}
}
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type AtomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func AtomicMorphUint64(target *uint64, morpher AtomicMorpherUint64) interface{} {
if target == nil || morpher == nil {
panic("target and morpher mut not be nil")
}
for {
currentVal := atomic.LoadUint64(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) {
return morphResult
}
}
}

522
2017-07-29/azblob/highlevel.go Normal file → Executable file
Просмотреть файл

@ -5,14 +5,14 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io" "io"
"net"
"net/http" "net/http"
"bytes" "bytes"
"github.com/Azure/azure-pipeline-go/pipeline"
"os" "os"
"sync" "sync"
"time" "time"
"github.com/Azure/azure-pipeline-go/pipeline"
) )
// CommonResponseHeaders returns the headers common to all blob REST API responses. // CommonResponseHeaders returns the headers common to all blob REST API responses.
@ -38,13 +38,13 @@ type CommonResponse interface {
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. // UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
type UploadToBlockBlobOptions struct { type UploadToBlockBlobOptions struct {
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxPutBlockBytes. // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
BlockSize uint64 BlockSize int64
// Progress is a function that is invoked periodically as bytes are send in a PutBlock call to the BlockBlobURL. // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
Progress pipeline.ProgressReceiver Progress pipeline.ProgressReceiver
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob when PutBlockList is called. // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
BlobHTTPHeaders BlobHTTPHeaders BlobHTTPHeaders BlobHTTPHeaders
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called. // Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
@ -61,96 +61,69 @@ type UploadToBlockBlobOptions struct {
func UploadBufferToBlockBlob(ctx context.Context, b []byte, func UploadBufferToBlockBlob(ctx context.Context, b []byte,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxPutBlockBytes { // Validate parameters and set defaults
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxPutBlockBytes)) if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
} }
if o.BlockSize == 0 { if o.BlockSize == 0 {
o.BlockSize = BlockBlobMaxPutBlockBytes // Default if unspecified o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
} }
size := uint64(len(b)) size := int64(len(b))
if size <= BlockBlobMaxPutBlobBytes { if size <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Put Blob call, do it this way // If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = bytes.NewReader(b) var body io.ReadSeeker = bytes.NewReader(b)
if o.Progress != nil { if o.Progress != nil {
body = pipeline.NewRequestBodyProgress(body, o.Progress) body = pipeline.NewRequestBodyProgress(body, o.Progress)
} }
return blockBlobURL.PutBlob(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
} }
parallelism := o.Parallelism var numBlocks = uint16(((size - 1) / o.BlockSize) + 1)
if parallelism == 0 {
parallelism = 5 // default parallelism
}
var numBlocks uint16 = uint16(((size - 1) / o.BlockSize) + 1)
if numBlocks > BlockBlobMaxBlocks { if numBlocks > BlockBlobMaxBlocks {
panic(fmt.Sprintf("The streamSize is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks)) panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
} }
ctx, cancel := context.WithCancel(ctx) blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
defer cancel() progress := int64(0)
blockIDList := make([]string, numBlocks) // Base 64 encoded block IDs
blockSize := o.BlockSize
putBlockChannel := make(chan func() (*BlockBlobsPutBlockResponse, error), parallelism) // Create the channel that release 'parallelism' goroutines concurrently
putBlockResponseChannel := make(chan error, numBlocks) // Holds each Put Block's response
// Create the goroutines that process each Put Block (in parallel)
for g := uint16(0); g < parallelism; g++ {
go func() {
for f := range putBlockChannel {
_, err := f()
putBlockResponseChannel <- err
}
}()
}
blobProgress := int64(0)
progressLock := &sync.Mutex{} progressLock := &sync.Mutex{}
// Add each put block to the channel err := doBatchTransfer(ctx, batchTransferOptions{
for blockNum := uint16(0); blockNum < numBlocks; blockNum++ { operationName: "UploadBufferToBlockBlob",
if blockNum == numBlocks-1 { // Last block transferSize: size,
blockSize = size - (uint64(blockNum) * o.BlockSize) // Remove size of all uploaded blocks from total chunkSize: o.BlockSize,
} parallelism: o.Parallelism,
offset := uint64(blockNum) * o.BlockSize operation: func(offset int64, count int64) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count])
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = pipeline.NewRequestBodyProgress(body,
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets a progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Prepare to read the proper block/section of the buffer // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+blockSize]) // at the same time causing PutBlockList to get a mix of blocks from all the clients.
capturedBlockNum := blockNum blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
if o.Progress != nil { _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions)
blockProgress := int64(0) return err
body = pipeline.NewRequestBodyProgress(body, },
func(bytesTransferred int64) { })
diff := bytesTransferred - blockProgress if err != nil {
blockProgress = bytesTransferred return nil, err
progressLock.Lock()
blobProgress += diff
o.Progress(blobProgress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
putBlockChannel <- func() (*BlockBlobsPutBlockResponse, error) {
return blockBlobURL.PutBlock(ctx, blockIDList[capturedBlockNum], body, o.AccessConditions.LeaseAccessConditions)
}
}
close(putBlockChannel)
// Wait for the put blocks to complete
for blockNum := uint16(0); blockNum < numBlocks; blockNum++ {
responseError := <-putBlockResponseChannel
if responseError != nil {
cancel() // As soon as any Put Block fails, cancel all remaining Put Block calls
return nil, responseError // No need to process anymore responses
}
} }
// All put blocks were successful, call Put Block List to finalize the blob // All put blocks were successful, call Put Block List to finalize the blob
return blockBlobURL.PutBlockList(ctx, blockIDList, o.Metadata, o.BlobHTTPHeaders, o.AccessConditions) return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
} }
// UploadFileToBlockBlob uploads a file in blocks to a block blob. // UploadFileToBlockBlob uploads a file in blocks to a block blob.
@ -172,78 +145,341 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File,
return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o)
} }
// DownloadStreamOptions is used to configure a call to NewDownloadBlobToStream to download a large stream with intelligent retries. ///////////////////////////////////////////////////////////////////////////////
type DownloadStreamOptions struct {
// Range indicates the starting offset and count of bytes within the blob to download.
Range BlobRange
// AccessConditions indicates the BlobAccessConditions to use when accessing the blob. /*
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
// DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions.
type DownloadFromBlobOptions struct {
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress pipeline.ProgressReceiver
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions BlobAccessConditions AccessConditions BlobAccessConditions
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
Parallelism uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
} }
type retryStream struct { // downloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
ctx context.Context func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, ac BlobAccessConditions, b []byte, o DownloadFromBlobOptions,
getBlob func(ctx context.Context, blobRange BlobRange, ac BlobAccessConditions, rangeGetContentMD5 bool) (*GetResponse, error) initialDownloadResponse *DownloadResponse) error {
o DownloadStreamOptions // Validate parameters, and set defaults.
response *http.Response if o.BlockSize < 0 {
} panic("BlockSize option must be >= 0")
// NewDownloadStream creates a stream over a blob allowing you download the blob's contents.
// When network errors occur, the retry stream internally issues new HTTP GET requests for
// the remaining range of the blob's contents. The GetBlob argument identifies the function
// to invoke when the GetRetryStream needs to make an HTTP GET request as Read methods are called.
// The callback can wrap the response body (with progress reporting, for example) before returning.
func NewDownloadStream(ctx context.Context,
getBlob func(ctx context.Context, blobRange BlobRange, ac BlobAccessConditions, rangeGetContentMD5 bool) (*GetResponse, error),
o DownloadStreamOptions) io.ReadCloser {
// BlobAccessConditions may already have an If-Match:etag header
if getBlob == nil {
panic("getBlob must not be nil")
} }
return &retryStream{ctx: ctx, getBlob: getBlob, o: o, response: nil} if o.BlockSize == 0 {
} o.BlockSize = BlobDefaultDownloadBlockSize
func (s *retryStream) Read(p []byte) (n int, err error) {
for {
if s.response != nil { // We working with a successful response
n, err := s.response.Body.Read(p) // Read from the stream
if err == nil || err == io.EOF { // We successfully read data or end EOF
s.o.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.o.Range.Count != 0 {
s.o.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
}
return n, err // Return the return to the caller
}
s.Close()
s.response = nil // Something went wrong; our stream is no longer good
if nerr, ok := err.(net.Error); ok {
if !nerr.Timeout() && !nerr.Temporary() {
return n, err // Not retryable
}
} else {
return n, err // Not retryable, just return
}
}
// We don't have a response stream to read from, try to get one
response, err := s.getBlob(s.ctx, s.o.Range, s.o.AccessConditions, false)
if err != nil {
return 0, err
}
// Successful GET; this is the network stream we'll read from
s.response = response.Response()
// Ensure that future requests are from the same version of the source
s.o.AccessConditions.IfMatch = response.ETag()
// Loop around and try to read from this stream
} }
}
func (s *retryStream) Close() error { if count == -1 { // If size not specified, calculate it
if s.response != nil && s.response.Body != nil { if initialDownloadResponse != nil {
return s.response.Body.Close() count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
} else {
// If we don't have the length at all, get it
dr, err := blobURL.Download(ctx, 0, CountToEnd, ac, false)
if err != nil {
return err
}
count = dr.ContentLength() - offset
}
}
if int64(len(b)) < count {
panic(fmt.Errorf("The buffer's size should be equal to or larger than the request count of bytes: %d.", count))
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := doBatchTransfer(ctx, batchTransferOptions{
operationName: "downloadBlobToBuffer",
bufferSize: count,
chunkSize: o.BlockSize,
parallelism: o.Parallelism,
operation: func(offset int64, count int64) error {
dr, err := blobURL.Download(ctx, offset, count, ac, false)
body := dr.Body(o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = pipeline.NewResponseBodyProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.ReadFull(body, b[offset:offset+count])
body.Close()
return err
},
})
if err != nil {
return err
} }
return nil return nil
} }
// DownloadAzureFileToBuffer downloads an Azure file to a buffer with parallel.
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, b []byte, o DownloadFromBlobOptions) error {
return downloadBlobToBuffer(ctx, blobURL, nil, b, o)
}
// DownloadBlobToFile downloads an Azure file to a local file.
// The file would be created if it doesn't exist, and would be truncated if the size doesn't match.
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, ac BlobAccessConditions, file *os.File, o DownloadFromBlobOptions) error {
// 1. Validate parameters.
if file == nil {
panic("file must not be nil")
}
// 2. Try to get Azure file's size.
props, err := blobURL.GetProperties(ctx, ac)
if err != nil {
return err
}
size := props.ContentLength()
// 3. Compare and try to resize local file's size if it doesn't match Azure file's size.
stat, err := file.Stat()
if err != nil {
return err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return err
}
}
// 4. Set mmap and call DownloadAzureFileToBuffer.
m, err := newMMF(file, true, 0, int(size))
if err != nil {
return err
}
defer m.unmap()
return downloadBlobToBuffer(ctx, blobURL, props, m, o)
}
*/
///////////////////////////////////////////////////////////////////////////////
// BatchTransferOptions identifies options used by doBatchTransfer.
type batchTransferOptions struct {
transferSize int64
chunkSize int64
parallelism uint16
operation func(offset int64, chunkSize int64) error
operationName string
}
// doBatchTransfer helps to execute operations in a batch manner.
func doBatchTransfer(ctx context.Context, o batchTransferOptions) error {
// Prepare and do parallel operations.
numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1)
operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create the goroutines that process each operation (in parallel).
if o.parallelism == 0 {
o.parallelism = 5 // default parallelism
}
for g := uint16(0); g < o.parallelism; g++ {
//grIndex := g
go func() {
for f := range operationChannel {
//fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
err := f()
operationResponseChannel <- err
//fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
}
}()
}
curChunkSize := o.chunkSize
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
if chunkNum == numChunks-1 { // Last chunk
curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.chunkSize
operationChannel <- func() error {
return o.operation(offset, curChunkSize)
}
}
close(operationChannel)
// Wait for the operations to complete.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
responseError := <-operationResponseChannel
if responseError != nil {
cancel() // As soon as any operation fails, cancel all remaining operation calls
return responseError // No need to process anymore responses
}
}
return nil
}
////////////////////////////////////////////////////////////////////////////////////////////////
type UploadStreamToBlockBlobOptions struct {
BufferSize int
MaxBuffers int
BlobHTTPHeaders BlobHTTPHeaders
Metadata Metadata
AccessConditions BlobAccessConditions
}
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
o UploadStreamToBlockBlobOptions) (*BlockBlobsCommitBlockListResponse, error) {
result, err := uploadStream(ctx, reader,
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
return result.(*BlockBlobsCommitBlockListResponse), err
}
type uploadStreamToBlockBlobOptions struct {
b BlockBlobURL
o UploadStreamToBlockBlobOptions
blockIDPrefix uuid // UUID used with all blockIDs
maxBlockNum uint32 // defaults to 0
firstBlock []byte // Used only if maxBlockNum is 0
}
func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) {
return nil, nil
}
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
if num == 0 && len(buffer) < t.o.BufferSize {
// If whole payload fits in 1 block, dont' stage it; End will upload it with 1 I/O operation
t.firstBlock = buffer
return nil
}
// Else, upload a staged block...
AtomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
if startVal < num {
return num, nil
}
return startVal, nil
})
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{})
return err
}
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
if t.maxBlockNum == 0 {
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
}
// Multiple blocks staged, commit them all now
blockID := newUuidBlockID(t.blockIDPrefix)
blockIDs := make([]string, t.maxBlockNum)
for bn := uint32(0); bn < t.maxBlockNum; bn++ {
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
}
return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
type iTransfer interface {
start(ctx context.Context) (interface{}, error)
chunk(ctx context.Context, num uint32, buffer []byte) error
end(ctx context.Context) (interface{}, error)
}
type UploadStreamOptions struct {
MaxBuffers int
BufferSize int
}
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
defer cancel()
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
type OutgoingMsg struct {
chunkNum uint32
buffer []byte
}
// Create a channel to hold the buffers usable for incoming datsa
incoming := make(chan []byte, o.MaxBuffers)
outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
if result, err := t.start(ctx); err != nil {
return result, err
}
numBuffers := 0 // The number of buffers & out going goroutines created so far
injectBuffer := func() {
// For each Buffer, create it and a goroutine to upload it
incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
numBuffers++
go func() {
for outgoingMsg := range outgoing {
// Upload the outgoing buffer
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
wg.Done() // Indicate this buffer was sent
if nil != err {
cancel()
}
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can use reuse this buffer now
}
}()
}
injectBuffer() // Create our 1st buffer & outgoing goroutine
// This goroutine grabs a buffer, reads from the stream into the buffer,
// and inserts the buffer into the outgoing channel to be uploaded
for c := uint32(0); true; c++ { // Iterate once per chunk
var buffer []byte
if numBuffers < o.MaxBuffers {
select {
// We're not at max buffers, see if a previously-created buffer is available
case buffer = <-incoming:
break
default:
// No buffer available; inject a new buffer & go routine to process it
injectBuffer()
buffer = <-incoming // Grab the just-injected buffer
}
} else {
// We are at max buffers, block until we get to reuse one
buffer = <-incoming
}
n, err := io.ReadFull(reader, buffer)
if err != nil {
buffer = buffer[:n] // Make slice match the # of read bytes
}
if len(buffer) > 0 {
// Buffer not empty, upload it
wg.Add(1) // We're posting a buffer to be sent
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
}
if err != nil { // The reader is done, no more outgoing buffers
break
}
}
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
wg.Wait() // Wait for all pending outgoing messages to complete
// After all blocks uploaded, commit them to the blob & return the result
return t.end(ctx)
}

28
2017-07-29/azblob/parsing_urls.go Normal file → Executable file
Просмотреть файл

@ -3,22 +3,22 @@ package azblob
import ( import (
"net/url" "net/url"
"strings" "strings"
"time"
) )
const ( const (
snapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" snapshot = "snapshot"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
) )
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an // A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). // existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature. // NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct { type BlobURLParts struct {
Scheme string // Ex: "https://" Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net" Host string // Ex: "account.blob.core.windows.net"
ContainerName string // "" if no container ContainerName string // "" if no container
BlobName string // "" if no blob BlobName string // "" if no blob
Snapshot time.Time // IsZero is true if not a snapshot Snapshot string // "" if not a snapshot
SAS SASQueryParameters SAS SASQueryParameters
UnparsedParams string UnparsedParams string
} }
@ -51,13 +51,13 @@ func NewBlobURLParts(u url.URL) BlobURLParts {
// Convert the query parameters to a case-sensitive map & trim whitespace // Convert the query parameters to a case-sensitive map & trim whitespace
paramsMap := u.Query() paramsMap := u.Query()
up.Snapshot = time.Time{} // Assume no snapshot up.Snapshot = "" // Assume no snapshot
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get("snapshot"); ok { if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
up.Snapshot, _ = time.Parse(snapshotTimeFormat, snapshotStr[0]) up.Snapshot = snapshotStr[0]
// If we recognized the query parameter, remove it from the map // If we recognized the query parameter, remove it from the map
delete(paramsMap, "snapshot") delete(paramsMap, snapshot)
} }
up.SAS = NewSASQueryParameters(paramsMap, true) up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode() up.UnparsedParams = paramsMap.Encode()
return up return up
} }
@ -88,11 +88,11 @@ func (up BlobURLParts) URL() url.URL {
rawQuery := up.UnparsedParams rawQuery := up.UnparsedParams
// Concatenate blob snapshot query parameter (if it exists) // Concatenate blob snapshot query parameter (if it exists)
if !up.Snapshot.IsZero() { if up.Snapshot != "" {
if len(rawQuery) > 0 { if len(rawQuery) > 0 {
rawQuery += "&" rawQuery += "&"
} }
rawQuery += "snapshot=" + up.Snapshot.Format(snapshotTimeFormat) rawQuery += snapshot + "=" + up.Snapshot
} }
sas := up.SAS.Encode() sas := up.SAS.Encode()
if sas != "" { if sas != "" {

Просмотреть файл

@ -1,58 +0,0 @@
package azblob
import (
"context"
"github.com/Azure/azure-pipeline-go/pipeline"
)
/**/
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
// This is Policy's Do method:
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
id := request.Header.Get(xMsClientRequestID)
if id == "" { // Add a unique request ID if the caller didn't specify one already
request.Header.Set(xMsClientRequestID, newUUID().String())
}
return next.Do(ctx, request)
}
})
}
/**/
const xMsClientRequestID = "x-ms-client-request-id"
/*
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
return &uniqueRequestIDPolicyFactory{}
}
// uniqueRequestIDPolicyFactory struct
type uniqueRequestIDPolicyFactory struct {
}
// New creates a UniqueRequestIDPolicy object.
func (f *uniqueRequestIDPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return &uniqueRequestIDPolicy{next: next}
}
// UniqueRequestIDPolicy ...
type uniqueRequestIDPolicy struct {
next pipeline.Policy
}
func (p *uniqueRequestIDPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
id := request.Header.Get(xMsClientRequestID)
if id == "" { // Add a unique request ID if the caller didn't specify one already
request.Header.Set(xMsClientRequestID, newUUID().String())
}
return p.next.Do(ctx, request)
}
*/

Просмотреть файл

@ -1,145 +0,0 @@
package azblob
import (
"net"
"net/url"
"strings"
"time"
)
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
//
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
Version string `param:"sv"`
Services string `param:"ss"`
ResourceTypes string `param:"srt"`
Protocol string `param:"spr"`
StartTime time.Time `param:"st"`
ExpiryTime time.Time `param:"se"`
IPRange IPRange `param:"sip"`
Identifier string `param:"si"`
Resource string `param:"sr"`
Permissions string `param:"sp"`
Signature string `param:"sig"`
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
End net.IP // Not specified if length = 0
}
// String returns a string representation of an IPRange.
func (ipr *IPRange) String() string {
if len(ipr.Start) == 0 {
return ""
}
start := ipr.Start.String()
if len(ipr.End) == 0 {
return start
}
return start + "-" + ipr.End.String()
}
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
// all SAS-related query parameters are removed from the passed-in map. If
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
func NewSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
p := SASQueryParameters{}
for k, v := range values {
val := v[0]
isSASKey := true
switch strings.ToLower(k) {
case "sv":
p.Version = val
case "ss":
p.Services = val
case "srt":
p.ResourceTypes = val
case "spr":
p.Protocol = val
case "st":
p.StartTime, _ = time.Parse(SASTimeFormat, val)
case "se":
p.ExpiryTime, _ = time.Parse(SASTimeFormat, val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
p.IPRange.Start = net.ParseIP(val)
} else {
p.IPRange.Start = net.ParseIP(val[:dashIndex])
p.IPRange.End = net.ParseIP(val[dashIndex+1:])
}
case "si":
p.Identifier = val
case "sr":
p.Resource = val
case "sp":
p.Permissions = val
case "sig":
p.Signature = val
default:
isSASKey = false // We didn't recognize the query parameter
}
if isSASKey && deleteSASParametersFromValues {
delete(values, k)
}
}
return p
}
// AddToValues adds the SAS components to the specified query parameters map.
func (p *SASQueryParameters) AddToValues(v url.Values) url.Values {
if p.Version != "" {
v.Add("sv", p.Version)
}
if p.Services != "" {
v.Add("ss", p.Services)
}
if p.ResourceTypes != "" {
v.Add("srt", p.ResourceTypes)
}
if p.Protocol != "" {
v.Add("spr", p.Protocol)
}
if !p.StartTime.IsZero() {
v.Add("st", p.StartTime.Format(SASTimeFormat))
}
if !p.ExpiryTime.IsZero() {
v.Add("se", p.ExpiryTime.Format(SASTimeFormat))
}
if len(p.IPRange.Start) > 0 {
v.Add("sip", p.IPRange.String())
}
if p.Identifier != "" {
v.Add("si", p.Identifier)
}
if p.Resource != "" {
v.Add("sr", p.Resource)
}
if p.Permissions != "" {
v.Add("sp", p.Permissions)
}
if p.Signature != "" {
v.Add("sig", p.Signature)
}
return v
}
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
func (p *SASQueryParameters) Encode() string {
v := url.Values{}
p.AddToValues(v)
return v.Encode()
}

371
2017-07-29/azblob/sas_service.go Normal file → Executable file
Просмотреть файл

@ -1,165 +1,206 @@
package azblob package azblob
import ( import (
"bytes" "bytes"
"strings" "fmt"
"time" "strings"
) "time"
)
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
type BlobSASSignatureValues struct { // BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
Version string `param:"sv"` // If not specified, this defaults to SASVersion type BlobSASSignatureValues struct {
Protocol string `param:"spr"` // See the SASProtocol* constants Version string `param:"sv"` // If not specified, this defaults to SASVersion
StartTime time.Time `param:"st"` // Not specified if IsZero Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
ExpiryTime time.Time `param:"se"` // Not specified if IsZero StartTime time.Time `param:"st"` // Not specified if IsZero
Permissions string `param:"sp"` ExpiryTime time.Time `param:"se"` // Not specified if IsZero
IPRange IPRange `param:"sip"` Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
ContainerName string IPRange IPRange `param:"sip"`
BlobName string // Use "" to create a Container SAS Identifier string `param:"si"`
Identifier string `param:"si"` ContainerName string
CacheControl string // rscc BlobName string // Use "" to create a Container SAS
ContentDisposition string // rscd CacheControl string // rscc
ContentEncoding string // rsce ContentDisposition string // rscd
ContentLanguage string // rscl ContentEncoding string // rsce
ContentType string // rsct ContentLanguage string // rscl
} ContentType string // rsct
}
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters. // NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters { // the proper SAS query parameters.
if sharedKeyCredential == nil { func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
panic("sharedKeyCredential can't be nil") if sharedKeyCredential == nil {
} panic("sharedKeyCredential can't be nil")
}
resource := "c"
if v.BlobName != "" { resource := "c"
resource = "b" if v.BlobName == "" {
} // Make sure the permission characters are in the correct order
if v.Version == "" { perms := &ContainerSASPermissions{}
v.Version = SASVersion if err := perms.Parse(v.Permissions); err != nil {
} panic(err)
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) }
v.Permissions = perms.String()
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx } else {
stringToSign := strings.Join([]string{ resource = "b"
v.Permissions, // Make sure the permission characters are in the correct order
startTime, perms := &BlobSASPermissions{}
expiryTime, if err := perms.Parse(v.Permissions); err != nil {
getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName), panic(err)
v.Identifier, }
v.IPRange.String(), v.Permissions = perms.String()
v.Protocol, }
v.Version, if v.Version == "" {
v.CacheControl, // rscc v.Version = SASVersion
v.ContentDisposition, // rscd }
v.ContentEncoding, // rsce startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
v.ContentLanguage, // rscl
v.ContentType}, // rsct // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
"\n") stringToSign := strings.Join([]string{
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) v.Permissions,
startTime,
p := SASQueryParameters{ expiryTime,
// Common SAS parameters getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName),
Version: v.Version, v.Identifier,
Protocol: v.Protocol, v.IPRange.String(),
StartTime: v.StartTime, string(v.Protocol),
ExpiryTime: v.ExpiryTime, v.Version,
Permissions: v.Permissions, v.CacheControl, // rscc
IPRange: v.IPRange, v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
// Container/Blob-specific SAS parameters v.ContentLanguage, // rscl
Resource: resource, v.ContentType}, // rsct
Identifier: v.Identifier, "\n")
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
// Calculated SAS signature
Signature: signature, p := SASQueryParameters{
} // Common SAS parameters
return p version: v.Version,
} protocol: v.Protocol,
startTime: v.StartTime,
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. expiryTime: v.ExpiryTime,
func getCanonicalName(account string, containerName string, blobName string) string { permissions: v.Permissions,
// Container: "/blob/account/containername" ipRange: v.IPRange,
// Blob: "/blob/account/containername/blobname"
elements := []string{"/blob/", account, "/", containerName} // Container/Blob-specific SAS parameters
if blobName != "" { resource: resource,
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) identifier: v.Identifier,
}
return strings.Join(elements, "") // Calculated SAS signature
} signature: signature,
}
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. return p
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. }
type ContainerSASPermissions struct {
Read, Add, Create, Write, Delete, List bool // getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
} func getCanonicalName(account string, containerName string, blobName string) string {
// Container: "/blob/account/containername"
// String produces the SAS permissions string for an Azure Storage container. // Blob: "/blob/account/containername/blobname"
// Call this method to set BlobSASSignatureValues's Permissions field. elements := []string{"/blob/", account, "/", containerName}
func (p ContainerSASPermissions) String() string { if blobName != "" {
var b bytes.Buffer elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
if p.Read { }
b.WriteRune('r') return strings.Join(elements, "")
} }
if p.Add {
b.WriteRune('a') // The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
} // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
if p.Create { type ContainerSASPermissions struct {
b.WriteRune('c') Read, Add, Create, Write, Delete, List bool
} }
if p.Write {
b.WriteRune('w') // String produces the SAS permissions string for an Azure Storage container.
} // Call this method to set BlobSASSignatureValues's Permissions field.
if p.Delete { func (p ContainerSASPermissions) String() string {
b.WriteRune('d') var b bytes.Buffer
} if p.Read {
if p.List { b.WriteRune('r')
b.WriteRune('l') }
} if p.Add {
return b.String() b.WriteRune('a')
} }
if p.Create {
// Parse initializes the ContainerSASPermissions's fields from a string. b.WriteRune('c')
func (p *ContainerSASPermissions) Parse(s string) { }
p.Read = strings.ContainsRune(s, 'r') if p.Write {
p.Add = strings.ContainsRune(s, 'a') b.WriteRune('w')
p.Create = strings.ContainsRune(s, 'c') }
p.Write = strings.ContainsRune(s, 'w') if p.Delete {
p.Delete = strings.ContainsRune(s, 'd') b.WriteRune('d')
p.List = strings.ContainsRune(s, 'l') }
} if p.List {
b.WriteRune('l')
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. }
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. return b.String()
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } }
// String produces the SAS permissions string for an Azure Storage blob. // Parse initializes the ContainerSASPermissions's fields from a string.
// Call this method to set BlobSASSignatureValues's Permissions field. func (p *ContainerSASPermissions) Parse(s string) error {
func (p BlobSASPermissions) String() string { *p = ContainerSASPermissions{} // Clear the flags
var b bytes.Buffer for _, r := range s {
if p.Read { switch r {
b.WriteRune('r') case 'r':
} p.Read = true
if p.Add { case 'a':
b.WriteRune('a') p.Add = true
} case 'c':
if p.Create { p.Create = true
b.WriteRune('c') case 'w':
} p.Write = true
if p.Write { case 'd':
b.WriteRune('w') p.Delete = true
} case 'l':
if p.Delete { p.List = true
b.WriteRune('d') default:
} return fmt.Errorf("Invalid permission: '%v'", r)
return b.String() }
} }
return nil
// Parse initializes the BlobSASPermissions's fields from a string. }
func (p *BlobSASPermissions) Parse(s string) {
p.Read = strings.ContainsRune(s, 'r') // The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
p.Add = strings.ContainsRune(s, 'a') // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
p.Create = strings.ContainsRune(s, 'c') type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
p.Write = strings.ContainsRune(s, 'w')
p.Delete = strings.ContainsRune(s, 'd') // String produces the SAS permissions string for an Azure Storage blob.
} // Call this method to set BlobSASSignatureValues's Permissions field.
func (p BlobSASPermissions) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
return b.String()
}
// Parse initializes the BlobSASPermissions's fields from a string.
func (p *BlobSASPermissions) Parse(s string) error {
*p = BlobSASPermissions{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
default:
return fmt.Errorf("Invalid permission: '%v'", r)
}
}
return nil
}

390
2017-07-29/azblob/service_codes_blob.go Normal file → Executable file
Просмотреть файл

@ -1,195 +1,195 @@
package azblob package azblob
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes // https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
// ServiceCode values indicate a service failure. // ServiceCode values indicate a service failure.
const ( const (
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet"
// ServiceCodeBlobAlreadyExists means the specified blob already exists. // ServiceCodeBlobAlreadyExists means the specified blob already exists.
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists"
// ServiceCodeBlobNotFound means the specified blob does not exist. // ServiceCodeBlobNotFound means the specified blob does not exist.
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound"
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten"
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength"
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit"
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong"
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier"
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
// Examine the HTTP status code and message for more information about the failure. // Examine the HTTP status code and message for more information about the failure.
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource"
// ServiceCodeContainerAlreadyExists means the specified container already exists. // ServiceCodeContainerAlreadyExists means the specified container already exists.
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists"
// ServiceCodeContainerBeingDeleted means the specified container is being deleted. // ServiceCodeContainerBeingDeleted means the specified container is being deleted.
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted"
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled"
// ServiceCodeContainerNotFound means the specified container does not exist. // ServiceCodeContainerNotFound means the specified container does not exist.
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound"
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit"
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported"
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch"
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
// that the operation for AppendBlob requires at least version 2015-02-21. // that the operation for AppendBlob requires at least version 2015-02-21.
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch"
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired"
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock"
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation. // ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType"
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId"
// ServiceCodeInvalidBlockList means the specified block list is invalid. // ServiceCodeInvalidBlockList means the specified block list is invalid.
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList"
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation"
// ServiceCodeInvalidPageRange means the page range specified is invalid. // ServiceCodeInvalidPageRange means the page range specified is invalid.
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange"
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType"
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl"
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation"
// ServiceCodeLeaseAlreadyPresent means there is already a lease present. // ServiceCodeLeaseAlreadyPresent means there is already a lease present.
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent"
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken"
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation"
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation"
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation"
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing"
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired"
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged"
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed"
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" ServiceCodeLeaseLost ServiceCodeType = "LeaseLost"
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation"
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation"
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation"
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet"
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation"
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob"
// ServiceCodePendingCopyOperation means there is currently a pending copy operation. // ServiceCodePendingCopyOperation means there is currently a pending copy operation.
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation"
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer"
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound"
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported"
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet"
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge"
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded"
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded"
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent"
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet"
// ServiceCodeSystemInUse means this blob is in use by the system. // ServiceCodeSystemInUse means this blob is in use by the system.
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" ServiceCodeSystemInUse ServiceCodeType = "SystemInUse"
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet"
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite"
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated"
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob. // ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" ServiceCodeBlobArchived ServiceCodeType = "BlobArchived"
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state. // ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived"
) )

32
2017-07-29/azblob/url_append_blob.go Normal file → Executable file
Просмотреть файл

@ -4,11 +4,18 @@ import (
"context" "context"
"io" "io"
"net/url" "net/url"
"time"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
const (
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
AppendBlobMaxBlocks = 50000
)
// AppendBlobURL defines a set of operations applicable to append blobs. // AppendBlobURL defines a set of operations applicable to append blobs.
type AppendBlobURL struct { type AppendBlobURL struct {
BlobURL BlobURL
@ -28,8 +35,8 @@ func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL {
} }
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. // WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass time.Time{} to remove the snapshot returning a URL to the base blob. // Pass "" to remove the snapshot returning a URL to the base blob.
func (ab AppendBlobURL) WithSnapshot(snapshot time.Time) AppendBlobURL { func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
p := NewBlobURLParts(ab.URL()) p := NewBlobURLParts(ab.URL())
p.Snapshot = snapshot p.Snapshot = snapshot
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
@ -37,22 +44,23 @@ func (ab AppendBlobURL) WithSnapshot(snapshot time.Time) AppendBlobURL {
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. // Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab AppendBlobURL) Create(ctx context.Context, metadata Metadata, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobsPutResponse, error) { func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobsCreateResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
return ab.blobClient.Put(ctx, BlobAppendBlob, nil, nil, nil, return ab.abClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.contentMD5Pointer(), &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
metadata, ac.LeaseAccessConditions.pointers(), &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil, nil, nil)
} }
// AppendBlock commits a new block of data to the end of the existing append blob. // AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobsAppendBlockResponse, error) { func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac BlobAccessConditions) (*AppendBlobsAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers() ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendBlobAccessConditions.pointers()
return ab.abClient.AppendBlock(ctx, body, nil, ac.LeaseAccessConditions.pointers(), return ab.abClient.AppendBlock(ctx, validateSeekableStreamAt0AndGetCount(body), body, nil,
ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }

169
2017-07-29/azblob/url_blob.go Normal file → Executable file
Просмотреть файл

@ -2,12 +2,8 @@ package azblob
import ( import (
"context" "context"
"fmt"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"net/url"
) )
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. // A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
@ -44,8 +40,8 @@ func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
} }
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. // WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
// Pass time.Time{} to remove the snapshot returning a URL to the base blob. // Pass "" to remove the snapshot returning a URL to the base blob.
func (b BlobURL) WithSnapshot(snapshot time.Time) BlobURL { func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
p := NewBlobURLParts(b.URL()) p := NewBlobURLParts(b.URL())
p.Snapshot = snapshot p.Snapshot = snapshot
return NewBlobURL(p.URL(), b.blobClient.Pipeline()) return NewBlobURL(p.URL(), b.blobClient.Pipeline())
@ -66,66 +62,30 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL {
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
} }
// StartCopy copies the data at the source URL to a blob. // DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b BlobURL) StartCopy(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobsCopyResponse, error) {
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
srcLeaseID := srcac.LeaseAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
return b.blobClient.Copy(ctx, source.String(), nil, metadata,
srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, srcLeaseID, nil)
}
// AbortCopy stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b BlobURL) AbortCopy(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobsAbortCopyResponse, error) {
return b.blobClient.AbortCopy(ctx, copyID, "abort", nil, ac.pointers(), nil)
}
// BlobRange defines a range of bytes within a blob, starting at Offset and ending
// at Offset+Count. Use a zero-value BlobRange to indicate the entire blob.
type BlobRange struct {
Offset int64
Count int64
}
func (dr *BlobRange) pointers() *string {
if dr.Offset < 0 {
panic("The blob's range Offset must be >= 0")
}
if dr.Count < 0 {
panic("The blob's range Count must be >= 0")
}
if dr.Offset == 0 && dr.Count == 0 {
return nil
}
endRange := ""
if dr.Count > 0 {
endRange = strconv.FormatInt((dr.Offset+dr.Count)-1, 10)
}
dataRange := fmt.Sprintf("bytes=%v-%s", dr.Offset, endRange)
return &dataRange
}
// GetBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b BlobURL) GetBlob(ctx context.Context, blobRange BlobRange, ac BlobAccessConditions, rangeGetContentMD5 bool) (*GetResponse, error) { func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
var xRangeGetContentMD5 *bool var xRangeGetContentMD5 *bool
if rangeGetContentMD5 { if rangeGetContentMD5 {
xRangeGetContentMD5 = &rangeGetContentMD5 xRangeGetContentMD5 = &rangeGetContentMD5
} }
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.Get(ctx, nil, nil, blobRange.pointers(), ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, dr, err := b.blobClient.Download(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
if err != nil {
return nil, err
}
return &DownloadResponse{
b: b,
r: dr,
ctx: ctx,
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()},
}, err
} }
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. // DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots. // Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobsDeleteResponse, error) { func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobsDeleteResponse, error) {
@ -134,26 +94,41 @@ func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOption
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// GetPropertiesAndMetadata returns the blob's metadata and properties. // Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b BlobURL) Undelete(ctx context.Context) (*BlobsUndeleteResponse, error) {
return b.blobClient.Undelete(ctx, nil, nil)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobsSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil)
}
// GetBlobProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b BlobURL) GetPropertiesAndMetadata(ctx context.Context, ac BlobAccessConditions) (*BlobsGetPropertiesResponse, error) { func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobsGetPropertiesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
// NOTE: GetMetadata actually calls GetProperties since this returns a the properties AND the metadata
} }
// SetProperties changes a blob's HTTP header properties. // SetBlobHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b BlobURL) SetProperties(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobsSetPropertiesResponse, error) { func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobsSetHTTPHeadersResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.SetProperties(ctx, nil, return b.blobClient.SetHTTPHeaders(ctx, nil,
&h.CacheControl, &h.ContentType, h.contentMD5Pointer(), &h.ContentEncoding, &h.ContentLanguage, &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
&h.ContentDisposition, nil, SequenceNumberActionNone, nil, nil) &h.ContentDisposition, nil)
} }
// SetMetadata changes a blob's metadata. // SetBlobMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobsSetMetadataResponse, error) { func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobsSetMetadataResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
@ -163,54 +138,54 @@ func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAcce
// CreateSnapshot creates a read-only snapshot of a blob. // CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobsTakeSnapshotResponse, error) { func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobsCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
// performance hit. // performance hit.
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return b.blobClient.TakeSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
} }
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between // AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
// 15 to 60 seconds, or infinite (-1). // 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobsLeaseResponse, error) { func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*BlobsAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.Lease(ctx, LeaseActionAcquire, nil, nil, nil, &duration, &proposedID, return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// RenewLease renews the blob's previously-acquired lease. // RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobsLeaseResponse, error) { func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobsRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.Lease(ctx, LeaseActionRenew, nil, &leaseID, nil, nil, nil, return b.blobClient.RenewLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// ReleaseLease releases the blob's previously-acquired lease. // ReleaseLease releases the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobsLeaseResponse, error) { func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*BlobsReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.Lease(ctx, LeaseActionRelease, nil, &leaseID, nil, nil, nil, return b.blobClient.ReleaseLease(ctx, leaseID, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant to break // BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// a fixed-duration lease when it expires or an infinite lease immediately. // constant to break a fixed-duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) BreakLease(ctx context.Context, leaseID string, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobsLeaseResponse, error) { func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac HTTPAccessConditions) (*BlobsBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.Lease(ctx, LeaseActionBreak, nil, &leaseID, leasePeriodPointer(breakPeriodInSeconds), nil, nil, return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// ChangeLease changes the blob's lease ID. // ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobsLeaseResponse, error) { func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*BlobsChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
return b.blobClient.Lease(ctx, LeaseActionChange, nil, &leaseID, nil, nil, &proposedID, return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. // LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
@ -223,18 +198,24 @@ func leasePeriodPointer(period int32) (p *int32) {
return nil return nil
} }
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. // StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b BlobURL) Undelete(ctx context.Context) (*BlobsUndeleteResponse, error) { func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac BlobAccessConditions, dstac BlobAccessConditions) (*BlobsStartCopyFromURLResponse, error) {
return b.blobClient.Undelete(ctx, nil, nil) srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.HTTPAccessConditions.pointers()
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.HTTPAccessConditions.pointers()
srcLeaseID := srcac.LeaseAccessConditions.pointers()
dstLeaseID := dstac.LeaseAccessConditions.pointers()
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
srcIfModifiedSince, srcIfUnmodifiedSince,
srcIfMatchETag, srcIfNoneMatchETag,
dstIfModifiedSince, dstIfUnmodifiedSince,
dstIfMatchETag, dstIfNoneMatchETag,
dstLeaseID, srcLeaseID, nil)
} }
// SetBlobTier operation sets the tier on a blob. The operation is allowed on a page // AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// blob in a premium storage account and on a block blob in a blob storage account (locally // For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobsAbortCopyFromURLResponse, error) {
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil)
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetBlobTier(ctx context.Context, tier AccessTierType) (*BlobsSetBlobTierResponse, error) {
return b.blobClient.SetBlobTier(ctx, tier, nil, nil)
} }

122
2017-07-29/azblob/url_block_blob.go Normal file → Executable file
Просмотреть файл

@ -4,17 +4,18 @@ import (
"context" "context"
"io" "io"
"net/url" "net/url"
"time"
"encoding/base64"
"encoding/binary"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
const ( const (
// BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to PutBlob. // BlockBlobMaxPutBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
BlockBlobMaxPutBlobBytes = 256 * 1024 * 1024 // 256MB BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxPutBlockBytes indicates the maximum number of bytes that can be sent in a call to PutBlock. // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
BlockBlobMaxPutBlockBytes = 100 * 1024 * 1024 // 100MB BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000 BlockBlobMaxBlocks = 50000
@ -42,24 +43,49 @@ func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL {
} }
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. // WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass time.Time{} to remove the snapshot returning a URL to the base blob. // Pass "" to remove the snapshot returning a URL to the base blob.
func (bb BlockBlobURL) WithSnapshot(snapshot time.Time) BlockBlobURL { func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
p := NewBlobURLParts(bb.URL()) p := NewBlobURLParts(bb.URL())
p.Snapshot = snapshot p.Snapshot = snapshot
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
} }
// PutBlob creates a new block blob, or updates the content of an existing block blob. // Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not // Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with PutBlob; the content of the existing blob is overwritten with the new content. To // supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob's, use PutBlock and PutBlockList. // perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb BlockBlobURL) PutBlob(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlobsPutResponse, error) { func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobsUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return bb.blobClient.Put(ctx, BlobBlockBlob, body, nil, nil, return bb.bbClient.Upload(ctx, validateSeekableStreamAt0AndGetCount(body), body,nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.contentMD5Pointer(), &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
metadata, ac.LeaseAccessConditions.pointers(), &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil, nil) &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobsStageBlockResponse, error) {
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, nil, ac.pointers(), nil)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
metadata Metadata, ac BlobAccessConditions) (*BlockBlobsCommitBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. // GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
@ -68,23 +94,55 @@ func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType,
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
} }
// PutBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to PutBlockList. //////////////////////////////////////////////////////////////////////////////////////////////////////////////
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb BlockBlobURL) PutBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions) (*BlockBlobsPutBlockResponse, error) { type BlockID [64]byte
return bb.bbClient.PutBlock(ctx, base64BlockID, body, nil, ac.pointers(), nil)
func (blockID BlockID) ToBase64() string {
return base64.StdEncoding.EncodeToString(blockID[:])
} }
// PutBlockList writes a blob by specifying the list of block IDs that make up the blob. func (blockID *BlockID) FromBase64(s string) error {
// In order to be written as part of a blob, a block must have been successfully written *blockID = BlockID{} // Zero out the block ID
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob _, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s))
// by uploading only those blocks that have changed, then committing the new and existing return err
// blocks together. Any blocks not specified in the block list and permanently deleted. }
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb BlockBlobURL) PutBlockList(ctx context.Context, base64BlockIDs []string, metadata Metadata, //////////////////////////////////////////////////////////////////////////////////////////////////////////////
h BlobHTTPHeaders, ac BlobAccessConditions) (*BlockBlobsPutBlockListResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() type uuidBlockID BlockID
return bb.bbClient.PutBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.contentMD5Pointer(), func (ubi uuidBlockID) UUID() uuid {
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, u := uuid{}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) copy(u[:], ubi[:len(u)])
return u
}
func (ubi uuidBlockID) Number() uint32 {
return binary.BigEndian.Uint32(ubi[len(uuid{}):])
}
func newUuidBlockID(u uuid) uuidBlockID {
ubi := uuidBlockID{} // Create a new uuidBlockID
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
// Block number defaults to 0
return ubi
}
func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID {
copy(ubi[:len(u)], u[:])
return ubi
}
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
return ubi // Return the passed-in copy
}
func (ubi uuidBlockID) ToBase64() string {
return BlockID(ubi).ToBase64()
}
func (ubi *uuidBlockID) FromBase64(s string) error {
return (*BlockID)(ubi).FromBase64(s)
} }

150
2017-07-29/azblob/url_container.go Normal file → Executable file
Просмотреть файл

@ -3,15 +3,15 @@ package azblob
import ( import (
"bytes" "bytes"
"context" "context"
"fmt"
"net/url" "net/url"
"strings"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. // A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type ContainerURL struct { type ContainerURL struct {
client containerClient client containersClient
} }
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. // NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
@ -19,7 +19,7 @@ func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
if p == nil { if p == nil {
panic("p can't be nil") panic("p can't be nil")
} }
client := newContainerClient(url, p) client := newContainersClient(url, p)
return ContainerURL{client: client} return ContainerURL{client: client}
} }
@ -81,32 +81,33 @@ func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainersCreateResponse, error) {
return c.client.Create(ctx, nil, metadata, publicAccessType, nil) return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
} }
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. // Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainersDeleteResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
} }
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, _, _:= ac.HTTPAccessConditions.pointers()
return c.client.Delete(ctx, nil, nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, nil)
} }
// GetPropertiesAndMetadata returns the container's metadata and system properties. // GetContainerProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c ContainerURL) GetPropertiesAndMetadata(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainersGetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API. // This allows us to not expose a GetProperties method at all simplifying the API.
return c.client.GetProperties(ctx, nil, ac.pointers(), nil) return c.client.GetProperties(ctx, nil, ac.pointers(), nil)
} }
// SetMetadata sets the container's metadata. // SetContainerMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainersSetMetadataResponse, error) {
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
} }
@ -114,10 +115,10 @@ func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac Con
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
} }
// GetPermissions returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. // GetContainerAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
func (c ContainerURL) GetPermissions(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) {
return c.client.GetACL(ctx, nil, ac.pointers(), nil) return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil)
} }
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. // The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
@ -152,77 +153,102 @@ func (p AccessPolicyPermission) String() string {
} }
// Parse initializes the AccessPolicyPermission's fields from a string. // Parse initializes the AccessPolicyPermission's fields from a string.
func (p *AccessPolicyPermission) Parse(s string) { func (p *AccessPolicyPermission) Parse(s string) error {
p.Read = strings.ContainsRune(s, 'r') *p = AccessPolicyPermission{} // Clear the flags
p.Add = strings.ContainsRune(s, 'a') for _, r := range s {
p.Create = strings.ContainsRune(s, 'c') switch r {
p.Write = strings.ContainsRune(s, 'w') case 'r':
p.Delete = strings.ContainsRune(s, 'd') p.Read = true
p.List = strings.ContainsRune(s, 'l') case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
default:
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
} }
// SetPermissions sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. // SetContainerAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c ContainerURL) SetPermissions(ctx context.Context, accessType PublicAccessType, permissions []SignedIdentifier, func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
ac ContainerAccessConditions) (*ContainerSetACLResponse, error) { ac ContainerAccessConditions) (*ContainersSetAccessPolicyResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
} }
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, _, _:= ac.HTTPAccessConditions.pointers()
return c.client.SetACL(ctx, permissions, nil, nil, accessType, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
} }
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). // AcquireContainerLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainerLeaseResponse, error) { func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac HTTPAccessConditions) (*ContainersAcquireLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.Lease(ctx, LeaseActionAcquire, nil, nil, nil, &duration, &proposedID, return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
ifModifiedSince, ifUnmodifiedSince, nil) ifModifiedSince, ifUnmodifiedSince, nil)
} }
// RenewLease renews the container's previously-acquired lease. // RenewContainerLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerLeaseResponse, error) { func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainersRenewLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.Lease(ctx, LeaseActionRenew, nil, &leaseID, nil, nil, nil, ifModifiedSince, ifUnmodifiedSince, nil) return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
} }
// ReleaseLease releases the container's previously-acquired lease. // ReleaseContainerLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainerLeaseResponse, error) { func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac HTTPAccessConditions) (*ContainersReleaseLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.Lease(ctx, LeaseActionRelease, nil, &leaseID, nil, nil, nil, ifModifiedSince, ifUnmodifiedSince, nil) return c.client.ReleaseLease(ctx,leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
} }
// BreakLease breaks the container's previously-acquired lease (if it exists). // BreakContainerLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) BreakLease(ctx context.Context, leaseID string, period int32, ac HTTPAccessConditions) (*ContainerLeaseResponse, error) { func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac HTTPAccessConditions) (*ContainersBreakLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.Lease(ctx, LeaseActionBreak, nil, &leaseID, leasePeriodPointer(period), nil, nil, ifModifiedSince, ifUnmodifiedSince, nil) return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
} }
// ChangeLease changes the container's lease ID. // ChangeContainerLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainerLeaseResponse, error) { func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac HTTPAccessConditions) (*ContainersChangeLeaseResponse, error) {
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
return c.client.Lease(ctx, LeaseActionChange, nil, &leaseID, nil, nil, &proposedID, ifModifiedSince, ifUnmodifiedSince, nil) return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
} }
// ListBlobs returns a single segment of blobs starting from the specified Marker. Use an empty // ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned // After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
// Marker) to get the next segment. // previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobs(ctx context.Context, marker Marker, o ListBlobsOptions) (*ListBlobsResponse, error) { func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatResponse, error) {
prefix, delimiter, include, maxResults := o.pointers() prefix, include, maxResults := o.pointers()
return c.client.ListBlobs(ctx, prefix, delimiter, marker.val, maxResults, include, nil, nil) return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
} }
// ListBlobsHierarchicalSegment returns a single segment of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchyResponse, error) {
prefix, include, maxResults := o.pointers()
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
}
// ListBlobsOptions defines options available when calling ListBlobs. // ListBlobsOptions defines options available when calling ListBlobs.
type ListBlobsOptions struct { type ListBlobsSegmentOptions struct {
Details BlobListingDetails // No IncludeType header is produced if "" Details BlobListingDetails // No IncludeType header is produced if ""
Prefix string // No Prefix header is produced if "" Prefix string // No Prefix header is produced if ""
Delimiter string
// SetMaxResults sets the maximum desired results you want the service to return. Note, the // SetMaxResults sets the maximum desired results you want the service to return. Note, the
// service may return fewer results than requested. // service may return fewer results than requested.
@ -230,14 +256,11 @@ type ListBlobsOptions struct {
MaxResults int32 MaxResults int32
} }
func (o *ListBlobsOptions) pointers() (prefix *string, delimiter *string, include ListBlobsIncludeType, maxResults *int32) { func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) {
if o.Prefix != "" { if o.Prefix != "" {
prefix = &o.Prefix prefix = &o.Prefix
} }
if o.Delimiter != "" { include = o.Details.slice()
delimiter = &o.Delimiter
}
include = ListBlobsIncludeType(o.Details.string())
if o.MaxResults != 0 { if o.MaxResults != 0 {
if o.MaxResults < 0 { if o.MaxResults < 0 {
panic("MaxResults must be >= 0") panic("MaxResults must be >= 0")
@ -253,26 +276,23 @@ type BlobListingDetails struct {
} }
// string produces the Include query parameter's value. // string produces the Include query parameter's value.
func (d *BlobListingDetails) string() string { func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
items := make([]string, 0, 4) items := []ListBlobsIncludeItemType{}
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
if d.Copy { if d.Copy {
items = append(items, string(ListBlobsIncludeCopy)) items = append(items, ListBlobsIncludeItemCopy)
} }
if d.Deleted { if d.Deleted {
items = append(items, string(ListBlobsIncludeDeleted)) items = append(items, ListBlobsIncludeItemDeleted)
} }
if d.Metadata { if d.Metadata {
items = append(items, string(ListBlobsIncludeMetadata)) items = append(items, ListBlobsIncludeItemMetadata)
} }
if d.Snapshots { if d.Snapshots {
items = append(items, string(ListBlobsIncludeSnapshots)) items = append(items, ListBlobsIncludeItemSnapshots)
} }
if d.UncommittedBlobs { if d.UncommittedBlobs {
items = append(items, string(ListBlobsIncludeUncommittedblobs)) items = append(items, ListBlobsIncludeItemUncommittedblobs)
} }
if len(items) > 0 { return items
return strings.Join(items, ",")
}
return string(ListBlobsIncludeNone)
} }

87
2017-07-29/azblob/url_page_blob.go Normal file → Executable file
Просмотреть файл

@ -6,7 +6,6 @@ import (
"io" "io"
"net/url" "net/url"
"strconv" "strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
@ -16,7 +15,7 @@ const (
PageBlobPageBytes = 512 PageBlobPageBytes = 512
// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
PageBlobMaxPutPagesBytes = 4 * 1024 * 1024 // 4MB PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
) )
// PageBlobURL defines a set of operations applicable to page blobs. // PageBlobURL defines a set of operations applicable to page blobs.
@ -41,76 +40,88 @@ func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL {
} }
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. // WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass time.Time{} to remove the snapshot returning a URL to the base blob. // Pass "" to remove the snapshot returning a URL to the base blob.
func (pb PageBlobURL) WithSnapshot(snapshot time.Time) PageBlobURL { func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
p := NewBlobURLParts(pb.URL()) p := NewBlobURLParts(pb.URL())
p.Snapshot = snapshot p.Snapshot = snapshot
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
} }
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob. // CreatePageBlob creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, metadata Metadata, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobsPutResponse, error) { func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobsCreateResponse, error) {
if sequenceNumber < 0 { if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0") panic("sequenceNumber must be greater than or equal to 0")
} }
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.blobClient.Put(ctx, BlobPageBlob, nil, nil, nil, return pb.pbClient.Create(ctx, 0, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.contentMD5Pointer(), &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
metadata, ac.LeaseAccessConditions.pointers(), metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil) &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &size, &sequenceNumber, nil)
} }
// PutPages writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. // UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) PutPages(ctx context.Context, pr PageRange, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobsPutPageResponse, error) { func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac BlobAccessConditions) (*PageBlobsUploadPagesResponse, error) {
count := validateSeekableStreamAt0AndGetCount(body)
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
return pb.pbClient.PutPage(ctx, PageWriteUpdate, body, nil, pr.pointers(), ac.LeaseAccessConditions.pointers(), return pb.pbClient.UploadPages(ctx, count, body, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// ClearPages frees the specified pages from the page blob. // ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb PageBlobURL) ClearPages(ctx context.Context, pr PageRange, ac BlobAccessConditions) (*PageBlobsPutPageResponse, error) { func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageBlobsClearPagesResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.PageBlobAccessConditions.pointers()
return pb.pbClient.PutPage(ctx, PageWriteClear, nil, nil, pr.pointers(), ac.LeaseAccessConditions.pointers(), return pb.pbClient.ClearPages(ctx, 0, nil,
PageRange{Start: offset, End: offset + count - 1}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. // GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRanges(ctx context.Context, br BlobRange, ac BlobAccessConditions) (*PageList, error) { func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.GetPageRanges(ctx, nil, nil, nil, br.pointers(), ac.LeaseAccessConditions.pointers(), return pb.pbClient.GetPageRanges(ctx, nil, nil,
httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. // GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, br BlobRange, prevSnapshot time.Time, ac BlobAccessConditions) (*PageList, error) { func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.pbClient.GetPageRanges(ctx, nil, nil, &prevSnapshot, br.pointers(), return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) httpRange{offset: offset, count: count}.pointers(),
ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
nil)
} }
// Resize resizes the page blob to the specified size (which must be a multiple of 512). // Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb PageBlobURL) Resize(ctx context.Context, length int64, ac BlobAccessConditions) (*BlobsSetPropertiesResponse, error) { func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobsResizeResponse, error) {
if length%PageBlobPageBytes != 0 { if size%PageBlobPageBytes != 0 {
panic("Length must be a multiple of PageBlobPageBytes (512)") panic("Size must be a multiple of PageBlobPageBytes (512)")
} }
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
return pb.blobClient.SetProperties(ctx, nil, nil, nil, nil, nil, nil, ac.LeaseAccessConditions.pointers(), return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, &length, SequenceNumberActionNone, nil, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
// SetSequenceNumber sets the page blob's sequence number. // SetSequenceNumber sets the page blob's sequence number.
func (pb PageBlobURL) SetSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobsSetPropertiesResponse, error) { ac BlobAccessConditions) (*PageBlobsUpdateSequenceNumberResponse, error) {
if sequenceNumber < 0 { if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0") panic("sequenceNumber must be greater than or equal to 0")
} }
@ -119,9 +130,9 @@ func (pb PageBlobURL) SetSequenceNumber(ctx context.Context, action SequenceNumb
sn = nil sn = nil
} }
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.HTTPAccessConditions.pointers()
return pb.blobClient.SetProperties(ctx, nil, &h.CacheControl, &h.ContentType, h.contentMD5Pointer(), &h.ContentEncoding, &h.ContentLanguage, return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
ac.LeaseAccessConditions.pointers(), ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, &h.ContentDisposition, nil, action, sn, nil) sn, nil)
} }
// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob. // StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
@ -129,12 +140,12 @@ func (pb PageBlobURL) SetSequenceNumber(ctx context.Context, action SequenceNumb
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. // The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and // For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. // https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb PageBlobURL) StartIncrementalCopy(ctx context.Context, source url.URL, snapshot time.Time, ac BlobAccessConditions) (*PageBlobsIncrementalCopyResponse, error) { func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobsCopyIncrementalResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers() ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.HTTPAccessConditions.pointers()
qp := source.Query() qp := source.Query()
qp.Set("snapshot", snapshot.Format(snapshotTimeFormat)) qp.Set("snapshot", snapshot)
source.RawQuery = qp.Encode() source.RawQuery = qp.Encode()
return pb.pbClient.IncrementalCopy(ctx, source.String(), nil, nil, return pb.pbClient.CopyIncremental(ctx, source.String(), nil, nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
} }
@ -145,10 +156,10 @@ func (pr PageRange) pointers() *string {
if pr.End <= 0 { if pr.End <= 0 {
panic("PageRange's End value must be greater than 0") panic("PageRange's End value must be greater than 0")
} }
if pr.Start%512 != 0 { if pr.Start%PageBlobPageBytes != 0 {
panic("PageRange's Start value must be a multiple of 512") panic("PageRange's Start value must be a multiple of 512")
} }
if pr.End%512 != 511 { if pr.End%PageBlobPageBytes != (PageBlobPageBytes - 1) {
panic("PageRange's End value must be 1 less than a multiple of 512") panic("PageRange's End value must be 1 less than a multiple of 512")
} }
if pr.End <= pr.Start { if pr.End <= pr.Start {
@ -166,25 +177,25 @@ type PageBlobAccessConditions struct {
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
IfSequenceNumberLessThan int32 IfSequenceNumberLessThan int64
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
// only if the blob's sequence number is less than or equal to a value. // only if the blob's sequence number is less than or equal to a value.
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
IfSequenceNumberLessThanOrEqual int32 IfSequenceNumberLessThanOrEqual int64
// IfSequenceNumberEqual ensures that the page blob operation succeeds // IfSequenceNumberEqual ensures that the page blob operation succeeds
// only if the blob's sequence number is equal to a value. // only if the blob's sequence number is equal to a value.
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
IfSequenceNumberEqual int32 IfSequenceNumberEqual int64
} }
// pointers is for internal infrastructure. It returns the fields as pointers. // pointers is for internal infrastructure. It returns the fields as pointers.
func (ac PageBlobAccessConditions) pointers() (snltoe *int32, snlt *int32, sne *int32) { func (ac PageBlobAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
if ac.IfSequenceNumberLessThan < -1 { if ac.IfSequenceNumberLessThan < -1 {
panic("Ifsequencenumberlessthan can't be less than -1") panic("Ifsequencenumberlessthan can't be less than -1")
} }
@ -195,7 +206,7 @@ func (ac PageBlobAccessConditions) pointers() (snltoe *int32, snlt *int32, sne *
panic("IfSequenceNumberEqual can't be less than -1") panic("IfSequenceNumberEqual can't be less than -1")
} }
var zero int32 // Defaults to 0 var zero int64 // Defaults to 0
switch ac.IfSequenceNumberLessThan { switch ac.IfSequenceNumberLessThan {
case -1: case -1:
snlt = &zero snlt = &zero

102
2017-07-29/azblob/url_service.go Normal file → Executable file
Просмотреть файл

@ -9,51 +9,13 @@ import (
) )
const ( const (
// RootContainerName is the special Azure Storage name used to identify a storage account's root container. // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
RootContainerName = "$root" ContainerNameRoot = "$root"
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
ContainerNameLogs = "$logs"
) )
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
type PipelineOptions struct {
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
Log pipeline.LogOptions
// Retry configures the built-in retry policy behavior.
Retry RetryOptions
// RequestLog configures the built-in request logging policy.
RequestLog RequestLogOptions
// Telemetry configures the built-in telemetry policy behavior.
Telemetry TelemetryOptions
}
// NewPipeline creates a Pipeline using the specified credentials and options.
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
if c == nil {
panic("c can't be nil")
}
// Closest to API goes first; closest to the wire goes last
f := []pipeline.Factory{
NewTelemetryPolicyFactory(o.Telemetry),
NewUniqueRequestIDPolicyFactory(),
NewRetryPolicyFactory(o.Retry),
}
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
// changes made by other factories (like UniqueRequestIDPolicyFactory)
f = append(f, c)
}
f = append(f,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
NewRequestLogPolicyFactory(o.RequestLog))
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
}
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. // A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
type ServiceURL struct { type ServiceURL struct {
client serviceClient client serviceClient
@ -94,15 +56,6 @@ func (s ServiceURL) NewContainerURL(containerName string) ContainerURL {
return NewContainerURL(containerURL, s.client.Pipeline()) return NewContainerURL(containerURL, s.client.Pipeline())
} }
// NewRootContainerURL creates a new ContainerURL object by concatenating $root (RootContainerName)
// to the end of ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the
// ServiceURL. To change the pipeline, create the ContainerURL and then call its WithPipeline method
// passing in the desired pipeline object. Or, call NewContainerURL instead of calling the NewContainerURL method.
func (s ServiceURL) NewRootContainerURL() ContainerURL {
containerURL := appendToURLPath(s.URL(), RootContainerName)
return NewContainerURL(containerURL, s.client.Pipeline())
}
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) // appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
func appendToURLPath(u url.URL, name string) url.URL { func appendToURLPath(u url.URL, name string) url.URL {
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
@ -123,25 +76,25 @@ func appendToURLPath(u url.URL, name string) url.URL {
return u return u
} }
// ListContainers returns a single segment of containers starting from the specified Marker. Use an empty // ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. // Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// After getting a segment, process it, and then call ListContainers again (passing the the previously-returned // After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
// Marker) to get the next segment. For more information, see // previously-returned Marker) to get the next segment. For more information, see
// https://docs.microsoft.com/rest/api/storageservices/list-containers2. // https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s ServiceURL) ListContainers(ctx context.Context, marker Marker, o ListContainersOptions) (*ListContainersResponse, error) { func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersResponse, error) {
prefix, include, maxResults := o.pointers() prefix, include, maxResults := o.pointers()
return s.client.ListContainers(ctx, prefix, marker.val, maxResults, include, nil, nil) return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
} }
// ListContainersOptions defines options available when calling ListContainers. // ListContainersOptions defines options available when calling ListContainers.
type ListContainersOptions struct { type ListContainersSegmentOptions struct {
Detail ListContainersDetail // No IncludeType header is produced if "" Detail ListContainersDetail // No IncludeType header is produced if ""
Prefix string // No Prefix header is produced if "" Prefix string // No Prefix header is produced if ""
MaxResults int32 // 0 means unspecified MaxResults int32 // 0 means unspecified
// TODO: update swagger to generate this type? // TODO: update swagger to generate this type?
} }
func (o *ListContainersOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
if o.Prefix != "" { if o.Prefix != "" {
prefix = &o.Prefix prefix = &o.Prefix
} }
@ -155,7 +108,7 @@ func (o *ListContainersOptions) pointers() (prefix *string, include ListContaine
return return
} }
// ListContainersDetail indicates what additional information the service should return with each container. // ListContainersFlatDetail indicates what additional information the service should return with each container.
type ListContainersDetail struct { type ListContainersDetail struct {
// Tells the service whether to return metadata for each container. // Tells the service whether to return metadata for each container.
Metadata bool Metadata bool
@ -174,23 +127,18 @@ func (d *ListContainersDetail) string() string {
return string(ListContainersIncludeNone) return string(ListContainersIncludeNone)
} }
// GetProperties operation gets the properties of a storage accounts Blob service, including properties func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
// for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. For more information see return bsu.client.GetProperties(ctx, nil, nil)
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-properties.
func (s ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
return s.client.GetProperties(ctx, nil, nil)
} }
// SetProperties operation sets properties for a storage accounts Blob service endpoint, func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
// including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. For more return bsu.client.SetProperties(ctx, properties, nil, nil)
// information see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-service-properties.
func (s ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
return s.client.SetProperties(ctx, properties, nil, nil)
} }
// GetStats operation retrieves statistics related to replication for the Blob service. It is only available on the secondary location func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
// endpoint when read-access geo-redundant replication is enabled for the storage account. return bsu.client.GetStatistics(ctx, nil, nil)
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-service-stats }
func (s ServiceURL) GetStats(ctx context.Context) (*StorageServiceStats, error) {
return s.client.GetStats(ctx, nil, nil) func (bsu ServiceURL) PreflightRequest(ctx context.Context) (*StorageServiceStats, error) {
return nil, nil
} }

0
2017-07-29/azblob/version.go Normal file → Executable file
Просмотреть файл

Просмотреть файл

@ -1,43 +1,55 @@
package azblob package azblob
import ( import (
"context" "context"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
// Credential represent any credential type; it is used to create a credential policy Factory. // Credential represent any credential type; it is used to create a credential policy Factory.
type Credential interface { type Credential interface {
pipeline.Factory pipeline.Factory
credentialMarker() credentialMarker()
} }
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) type credentialFunc pipeline.FactoryFunc
// requests that read blobs from public containers or for use with Shared Access
// Signatures (SAS). func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
func NewAnonymousCredential() Credential { return f(next, po)
return &anonymousCredentialPolicyFactory{} }
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
// anonymousCredentialPolicyFactory is the credential's policy factory. func (credentialFunc) credentialMarker() {}
type anonymousCredentialPolicyFactory struct {
} //////////////////////////////
// New creates a credential policy object. // NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, config *pipeline.PolicyOptions) pipeline.Policy { // or for use with Shared Access Signatures (SAS).
return &anonymousCredentialPolicy{next: next} func NewAnonymousCredential() Credential {
} return anonymousCredentialFactory
}
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*anonymousCredentialPolicyFactory) credentialMarker() {} var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
// anonymousCredentialPolicy is the credential's policy object. // anonymousCredentialPolicyFactory is the credential's policy factory.
type anonymousCredentialPolicy struct { type anonymousCredentialPolicyFactory struct {
next pipeline.Policy }
}
// New creates a credential policy object.
// Do implements the credential's policy interface. func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { return &anonymousCredentialPolicy{next: next}
// For anonymous credentials, this is effectively a no-op }
return p.next.Do(ctx, request)
} // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*anonymousCredentialPolicyFactory) credentialMarker() {}
// anonymousCredentialPolicy is the credential's policy object.
type anonymousCredentialPolicy struct {
next pipeline.Policy
}
// Do implements the credential's policy interface.
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// For anonymous credentials, this is effectively a no-op
return p.next.Do(ctx, request)
}

Просмотреть файл

@ -6,6 +6,7 @@ import (
"crypto/hmac" "crypto/hmac"
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
"fmt"
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
@ -39,39 +40,31 @@ func (f SharedKeyCredential) AccountName() string {
} }
// New creates a credential policy object. // New creates a credential policy object.
func (f *SharedKeyCredential) New(next pipeline.Policy, config *pipeline.PolicyOptions) pipeline.Policy { func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return sharedKeyCredentialPolicy{factory: f, next: next, config: config} return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// Add a x-ms-date header if it doesn't already exist
if d := request.Header.Get(headerXmsDate); d == "" {
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
}
stringToSign := f.buildStringToSign(request)
signature := f.ComputeHMACSHA256(stringToSign)
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
request.Header[headerAuthorization] = []string{authHeader}
response, err := next.Do(ctx, request)
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
fmt.Println("HASH:", signature)
fmt.Println("S2S\n", stringToSign)
// Service failed to authenticate request, log it
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
}
return response, err
})
} }
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (*SharedKeyCredential) credentialMarker() {} func (*SharedKeyCredential) credentialMarker() {}
// sharedKeyCredentialPolicy is the credential's policy object.
type sharedKeyCredentialPolicy struct {
factory *SharedKeyCredential
next pipeline.Policy
config *pipeline.PolicyOptions
}
// Do implements the credential's policy interface.
func (p sharedKeyCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// Add a x-ms-date header if it doesn't already exist
if d := request.Header.Get(headerXmsDate); d == "" {
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
}
stringToSign := p.factory.buildStringToSign(request)
signature := p.factory.ComputeHMACSHA256(stringToSign)
authHeader := strings.Join([]string{"SharedKey ", p.factory.accountName, ":", signature}, "")
request.Header[headerAuthorization] = []string{authHeader}
response, err := p.next.Do(ctx, request)
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
// Service failed to authenticate request, log it
p.config.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
}
return response, err
}
// Constants ensuring that header names are correctly spelled and consistently cased. // Constants ensuring that header names are correctly spelled and consistently cased.
const ( const (
headerAuthorization = "Authorization" headerAuthorization = "Authorization"

Просмотреть файл

@ -1,48 +1,39 @@
package azblob package azblob
import ( import (
"context" "context"
"sync/atomic" "sync/atomic"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
// NewTokenCredential creates a token credential for use with role-based // NewTokenCredential creates a token credential for use with role-based
// access control (RBAC) access to Azure Storage resources. // access control (RBAC) access to Azure Storage resources.
func NewTokenCredential(token string) *TokenCredential { func NewTokenCredential(token string) *TokenCredential {
f := &TokenCredential{} f := &TokenCredential{}
f.SetToken(token) f.SetToken(token)
return f return f
} }
// TokenCredential is a pipeline.Factory is the credential's policy factory. // TokenCredential is a pipeline.Factory is the credential's policy factory.
type TokenCredential struct{ token atomic.Value } type TokenCredential struct{ token atomic.Value }
// Token returns the current token value // Token returns the current token value
func (f *TokenCredential) Token() string { return f.token.Load().(string) } func (f *TokenCredential) Token() string { return f.token.Load().(string) }
// SetToken changes the current token value // SetToken changes the current token value
func (f *TokenCredential) SetToken(token string) { f.token.Store(token) } func (f *TokenCredential) SetToken(token string) { f.token.Store(token) }
// New creates a credential policy object. // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
func (f *TokenCredential) New(next pipeline.Policy, config *pipeline.PolicyOptions) pipeline.Policy { func (*TokenCredential) credentialMarker() {}
return &tokenCredentialPolicy{factory: f, next: next}
} // New creates a credential policy object.
func (f *TokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
func (*TokenCredential) credentialMarker() {} if request.URL.Scheme != "https" {
panic("Token credentials require a URL using the https protocol scheme.")
// tokenCredentialPolicy is the credential's policy object. }
type tokenCredentialPolicy struct { request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
factory *TokenCredential return next.Do(ctx, request)
next pipeline.Policy })
} }
// Do implements the credential's policy interface.
func (p tokenCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
if request.URL.Scheme != "https" {
panic("Token credentials require a URL using the https protocol scheme.")
}
request.Header[headerAuthorization] = []string{"Bearer " + p.factory.Token()}
return p.next.Do(ctx, request)
}

Просмотреть файл

@ -1,25 +1,27 @@
package azblob // +build linux darwin
import ( package azblob
"os"
"syscall" import (
) "os"
"syscall"
type mmf []byte )
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { type mmf []byte
prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
if writable { func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
} if writable {
addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED
return mmf(addr), err }
} addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags)
return mmf(addr), err
func (m *mmf) unmap() { }
err := syscall.Munmap(*m)
*m = nil func (m *mmf) unmap() {
if err != nil { err := syscall.Munmap(*m)
panic(err) *m = nil
} if err != nil {
} panic(err)
}
}

Просмотреть файл

@ -1,38 +1,38 @@
package azblob package azblob
import ( import (
"os" "os"
"reflect" "reflect"
"syscall" "syscall"
"unsafe" "unsafe"
) )
type mmf []byte type mmf []byte
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
if writable { if writable {
prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
} }
hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil)
if hMMF == 0 { if hMMF == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno) return nil, os.NewSyscallError("CreateFileMapping", errno)
} }
defer syscall.CloseHandle(hMMF) defer syscall.CloseHandle(hMMF)
addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
m := mmf{} m := mmf{}
h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
h.Data = addr h.Data = addr
h.Len = length h.Len = length
h.Cap = h.Len h.Cap = h.Len
return m, nil return m, nil
} }
func (m *mmf) unmap() { func (m *mmf) unmap() {
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
*m = mmf{} *m = mmf{}
err := syscall.UnmapViewOfFile(addr) err := syscall.UnmapViewOfFile(addr)
if err != nil { if err != nil {
panic(err) panic(err)
} }
} }

Просмотреть файл

@ -0,0 +1,46 @@
package azblob
import (
"github.com/Azure/azure-pipeline-go/pipeline"
)
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
type PipelineOptions struct {
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
Log pipeline.LogOptions
// Retry configures the built-in retry policy behavior.
Retry RetryOptions
// RequestLog configures the built-in request logging policy.
RequestLog RequestLogOptions
// Telemetry configures the built-in telemetry policy behavior.
Telemetry TelemetryOptions
}
// NewPipeline creates a Pipeline using the specified credentials and options.
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
if c == nil {
panic("c can't be nil")
}
// Closest to API goes first; closest to the wire goes last
f := []pipeline.Factory{
NewTelemetryPolicyFactory(o.Telemetry),
NewUniqueRequestIDPolicyFactory(),
NewRetryPolicyFactory(o.Retry),
}
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
// changes made by other factories (like UniqueRequestIDPolicyFactory)
f = append(f, c)
}
f = append(f,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
NewRequestLogPolicyFactory(o.RequestLog))
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
}

Просмотреть файл

@ -18,6 +18,9 @@ type RequestLogOptions struct {
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
// duration (-1=no logging; 0=default threshold). // duration (-1=no logging; 0=default threshold).
LogWarningIfTryOverThreshold time.Duration LogWarningIfTryOverThreshold time.Duration
// TurnOffDefaultLogging turns off the default logging written to Windows Event Log or syslog.
//TurnOffDefaultLogging bool
} }
func (o RequestLogOptions) defaults() RequestLogOptions { func (o RequestLogOptions) defaults() RequestLogOptions {
@ -82,12 +85,12 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
} }
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
if err != nil { // This HTTP request did not get a response from the service if err != nil { // This HTTP request did not get a response from the service
fmt.Fprintf(b, "REQUEST ERROR\n") fmt.Fprint(b, "REQUEST ERROR\n")
} else { } else {
if logLevel == pipeline.LogError { if logLevel == pipeline.LogError {
fmt.Fprintf(b, "RESPONSE STATUS CODE ERROR\n") fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
} else { } else {
fmt.Fprintf(b, "RESPONSE SUCCESSFULLY RECEIVED\n") fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
} }
} }
@ -97,7 +100,7 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
} }
msg := b.String() msg := b.String()
if forceLog { if forceLog /*&& !o.TurnOffDefaultLogging */{
pipeline.ForceLog(logLevel, msg) pipeline.ForceLog(logLevel, msg)
} }
if shouldLog { if shouldLog {

Просмотреть файл

@ -9,6 +9,8 @@ import (
"time" "time"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
"io"
) )
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. // RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
@ -40,8 +42,11 @@ type RetryOptions struct {
TryTimeout time.Duration TryTimeout time.Duration
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default). // RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
// The delay increases (exponentially or linearly) with each retry up to a maximum specified by // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
// MaxRetryDelay. If you specify 0, then you must also specify 0 for MaxRetryDelay. // with each retry up to a maximum specified by MaxRetryDelay.
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
// equal to or greater than RetryDelay.
RetryDelay time.Duration RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
@ -52,7 +57,12 @@ type RetryOptions struct {
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
RetryReadsFromSecondaryHost string RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
}
func (o RetryOptions) retryReadsFromSecondaryHost() string {
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
//return "" // This is for non-blob SDKs
} }
func (o RetryOptions) defaults() RetryOptions { func (o RetryOptions) defaults() RetryOptions {
@ -117,7 +127,7 @@ func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never
} }
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
delay *= time.Duration(rand.Float32()/2 + 0.8) // NOTE: We want math/rand; not crypto/rand delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
if delay > o.MaxRetryDelay { if delay > o.MaxRetryDelay {
delay = o.MaxRetryDelay delay = o.MaxRetryDelay
} }
@ -133,10 +143,10 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.RetryReadsFromSecondaryHost != "" considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
// When to retry: connection failure or an HTTP status code of 500 or greater, except 501 and 505 // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
// If using a secondary: // If using a secondary:
// Even tries go against primary; odd tries go against the secondary // Even tries go against primary; odd tries go against the secondary
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
@ -161,14 +171,15 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
// Clone the original request to ensure that each try starts with the original (unmutated) request. // Clone the original request to ensure that each try starts with the original (unmutated) request.
requestCopy := request.Copy() requestCopy := request.Copy()
if try > 1 {
// For a retry, seek to the beginning of the Body stream. // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
if err = requestCopy.RewindBody(); err != nil { // the stream may not be at offset 0 when we first get it and we want the same behavior for the
panic(err) // 1st try as for additional tries.
} if err = requestCopy.RewindBody(); err != nil {
panic(err)
} }
if !tryingPrimary { if !tryingPrimary {
requestCopy.Request.URL.Host = o.RetryReadsFromSecondaryHost requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost()
} }
// Set the server-side timeout query parameter "timeout=[seconds]" // Set the server-side timeout query parameter "timeout=[seconds]"
@ -211,7 +222,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
action = "Retry: Secondary URL returned 404" action = "Retry: Secondary URL returned 404"
case err != nil: case err != nil:
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation
if nerr, ok := err.(net.Error); ok && (nerr.Temporary() || nerr.Timeout()) { if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) {
action = "Retry: net.Error and Temporary() or Timeout()" action = "Retry: net.Error and Temporary() or Timeout()"
} else { } else {
action = "NoRetry: unrecognized error" action = "NoRetry: unrecognized error"
@ -234,6 +245,12 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
} }
break // Don't retry break // Don't retry
} }
if response != nil && response.Response() != nil {
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
body := response.Response().Body
io.Copy(ioutil.Discard, body)
body.Close()
}
// If retrying, cancel the current per-try timeout context // If retrying, cancel the current per-try timeout context
tryCancel() tryCancel()
} }
@ -262,7 +279,7 @@ func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
} }
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
// For an HTTP request, the ReadCloser MUST also implement seek // For an HTTP request, the ReadCloser MUST also implement seek
// For an HTTP Repsonse, Seek MUST not be called (or this will panic) // For an HTTP response, Seek MUST not be called (or this will panic)
o, err := r.r.(io.Seeker).Seek(offset, whence) o, err := r.r.(io.Seeker).Seek(offset, whence)
return o, improveDeadlineExceeded(err) return o, improveDeadlineExceeded(err)
} }

Просмотреть файл

@ -1,51 +1,51 @@
package azblob package azblob
import ( import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
// TelemetryOptions configures the telemetry policy's behavior. // TelemetryOptions configures the telemetry policy's behavior.
type TelemetryOptions struct { type TelemetryOptions struct {
// Value is a string prepended to each request's User-Agent and sent to the service. // Value is a string prepended to each request's User-Agent and sent to the service.
// The service records the user-agent in logs for diagnostics and tracking of client requests. // The service records the user-agent in logs for diagnostics and tracking of client requests.
Value string Value string
} }
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects // NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
// which add telemetry information to outgoing HTTP requests. // which add telemetry information to outgoing HTTP requests.
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
b := &bytes.Buffer{} b := &bytes.Buffer{}
b.WriteString(o.Value) b.WriteString(o.Value)
if b.Len() > 0 { if b.Len() > 0 {
b.WriteRune(' ') b.WriteRune(' ')
} }
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
telemetryValue := b.String() telemetryValue := b.String()
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
request.Header.Set("User-Agent", telemetryValue) request.Header.Set("User-Agent", telemetryValue)
return next.Do(ctx, request) return next.Do(ctx, request)
} }
}) })
} }
// NOTE: the ONLY function that should write to this variable is this func // NOTE: the ONLY function that should write to this variable is this func
var platformInfo = func() string { var platformInfo = func() string {
// Azure-Storage/version (runtime; os type and version)” // Azure-Storage/version (runtime; os type and version)”
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
operatingSystem := runtime.GOOS // Default OS string operatingSystem := runtime.GOOS // Default OS string
switch operatingSystem { switch operatingSystem {
case "windows": case "windows":
operatingSystem = os.Getenv("OS") // Get more specific OS information operatingSystem = os.Getenv("OS") // Get more specific OS information
case "linux": // accept default OS info case "linux": // accept default OS info
case "freebsd": // accept default OS info case "freebsd": // accept default OS info
} }
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
}() }()

Просмотреть файл

@ -0,0 +1,24 @@
package azblob
import (
"context"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
// This is Policy's Do method:
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
id := request.Header.Get(xMsClientRequestID)
if id == "" { // Add a unique request ID if the caller didn't specify one already
request.Header.Set(xMsClientRequestID, newUUID().String())
}
return next.Do(ctx, request)
}
})
}
const xMsClientRequestID = "x-ms-client-request-id"

Просмотреть файл

@ -0,0 +1,125 @@
package azblob
import (
"context"
"io"
"net"
"net/http"
)
const CountToEnd = 0
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
// that should be used to make an HTTP GET request.
type HTTPGetterInfo struct {
// Offset specifies the start offset that should be used when
// creating the HTTP GET request's Range header
Offset int64
// Count specifies the count of bytes that should be used to calculate
// the end offset when creating the HTTP GET request's Range header
Count int64
// ETag specifies the resource's etag that should be used when creating
// the HTTP GET request's If-Match header
ETag ETag
}
// RetryingReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
// while reading from a RetryReader. A value of zero means that no additional HTTP
// GET requests will be made.
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
}
// retryReader implements io.ReaderCloser methods.
// retryReader tries to read from response, and if the response is nil or there is retriable network error
// returned during reading, it will retry according to resilient reader option through executing
// user defined action with provided data to get a new response, and continue the overall reading process
// through reading from the new response.
type retryReader struct {
ctx context.Context
response *http.Response
info HTTPGetterInfo
o RetryReaderOptions
getter HTTPGetter
}
// NewResilientReader creates a resilient reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
if getter == nil {
panic("getter must not be nil")
}
return &retryReader{ctx: ctx, getter: getter, info: info, response: initialResponse, o: o}
}
func (s *retryReader) Read(p []byte) (n int, err error) {
try := 0
for ; try <= s.o.MaxRetryRequests; try++ {
//fmt.Println(try) // Comment out for debugging.
if s.response != nil { // We're working with a successful response
n, err := s.response.Body.Read(p) // Read from the stream
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
err = &net.DNSError{IsTemporary: true}
}
// We successfully read data or end EOF.
if err == nil || err == io.EOF {
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Count != CountToEnd {
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
}
return n, err // Return the return to the caller
}
s.Close()
s.response = nil // Something went wrong; our stream is no longer good
// Check the retry count and error code, and decide whether to retry.
if try == s.o.MaxRetryRequests {
return n, err // No retry, or retry exhausted
} else if netErr, ok := err.(net.Error); ok {
if !netErr.Timeout() && !netErr.Temporary() {
return n, err // Not retryable
}
} else {
return n, err // Not retryable, just return
}
}
// We don't have a response stream to read from, try to get one.
response, err := s.getter(s.ctx, s.info)
if err != nil {
return 0, err
}
// Successful GET; this is the network stream we'll read from.
s.response = response
// Loop around and try to read from this stream.
}
if s.o.doInjectError &&
s.o.doInjectErrorRound <= s.o.MaxRetryRequests &&
s.o.doInjectErrorRound > 0 &&
try < s.o.doInjectErrorRound {
panic("invalid status, internal error, stream read retry is not working properly.")
}
return 0, nil // The compiler is wrong; we never actually get here
}
func (s *retryReader) Close() error {
if s.response != nil && s.response.Body != nil {
return s.response.Body.Close()
}
return nil
}

Просмотреть файл

@ -1,170 +1,217 @@
package azblob package azblob
import ( import (
"bytes" "bytes"
"strings" "fmt"
"time" "strings"
) "time"
)
// SASVersion indicates the SAS version.
const SASVersion = "2015-04-05" // AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
const ( type AccountSASSignatureValues struct {
// SASProtocolHTTPS can be specified for a SAS protocol Version string `param:"sv"` // If not specified, this defaults to SASVersion
SASProtocolHTTPS = "https" Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol ExpiryTime time.Time `param:"se"` // Not specified if IsZero
SASProtocolHTTPSandHTTP = "https,http" Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
) IPRange IPRange `param:"sip"`
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). }
func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
ss := "" // NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
if !startTime.IsZero() { // the proper SAS query parameters.
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
} // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
se := "" if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
if !expiryTime.IsZero() { panic("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" }
} if v.Version == "" {
return ss, se v.Version = SASVersion
} }
perms := &AccountSASPermissions{}
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. if err := perms.Parse(v.Permissions); err != nil {
type AccountSASSignatureValues struct { panic(err)
Version string `param:"sv"` // If not specified, this defaults to azstorage.SASVersion }
Protocol string `param:"spr"` // See the SASProtocol* constants v.Permissions = perms.String()
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
Permissions string `param:"sp"`
IPRange IPRange `param:"sip"` stringToSign := strings.Join([]string{
Services string `param:"ss"` sharedKeyCredential.AccountName(),
ResourceTypes string `param:"srt"` v.Permissions,
} v.Services,
v.ResourceTypes,
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce startTime,
// the proper SAS query parameters. expiryTime,
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters { v.IPRange.String(),
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS string(v.Protocol),
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { v.Version,
panic("Account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") ""}, // That right, the account SAS requires a terminating extra newline
} "\n")
if v.Version == "" {
v.Version = SASVersion signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
} p := SASQueryParameters{
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) // Common SAS parameters
version: v.Version,
stringToSign := strings.Join([]string{ protocol: v.Protocol,
sharedKeyCredential.AccountName(), startTime: v.StartTime,
v.Permissions, expiryTime: v.ExpiryTime,
v.Services, permissions: v.Permissions,
v.ResourceTypes, ipRange: v.IPRange,
startTime,
expiryTime, // Account-specific SAS parameters
v.IPRange.String(), services: v.Services,
v.Protocol, resourceTypes: v.ResourceTypes,
v.Version,
""}, // That right, the account SAS requires a terminating extra newline // Calculated SAS signature
"\n") signature: signature,
}
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) return p
p := SASQueryParameters{ }
// Common SAS parameters
Version: v.Version, // The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
Protocol: v.Protocol, // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
StartTime: v.StartTime, type AccountSASPermissions struct {
ExpiryTime: v.ExpiryTime, Read, Write, Delete, List, Add, Create, Update, Process bool
Permissions: v.Permissions, }
IPRange: v.IPRange,
// String produces the SAS permissions string for an Azure Storage account.
// Account-specific SAS parameters // Call this method to set AccountSASSignatureValues's Permissions field.
Services: v.Services, func (p AccountSASPermissions) String() string {
ResourceTypes: v.ResourceTypes, var buffer bytes.Buffer
if p.Read {
// Calculated SAS signature buffer.WriteRune('r')
Signature: signature, }
} if p.Write {
return p buffer.WriteRune('w')
} }
if p.Delete {
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. buffer.WriteRune('d')
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. }
type AccountSASPermissions struct { if p.List {
Read, Write, Delete, List, Add, Create, Update, Process bool buffer.WriteRune('l')
} }
if p.Add {
// String produces the SAS permissions string for an Azure Storage account. buffer.WriteRune('a')
// Call this method to set AccountSASSignatureValues's Permissions field. }
func (p AccountSASPermissions) String() string { if p.Create {
var buffer bytes.Buffer buffer.WriteRune('c')
if p.Read { }
buffer.WriteRune('r') if p.Update {
} buffer.WriteRune('u')
if p.Write { }
buffer.WriteRune('w') if p.Process {
} buffer.WriteRune('p')
if p.Delete { }
buffer.WriteRune('d') return buffer.String()
} }
if p.List {
buffer.WriteRune('l') // Parse initializes the AccountSASPermissions's fields from a string.
} func (p *AccountSASPermissions) Parse(s string) error {
if p.Add { *p = AccountSASPermissions{} // Clear out the flags
buffer.WriteRune('a') for _, r := range s {
} switch r {
if p.Create { case 'r':
buffer.WriteRune('c') p.Read = true
} case 'w':
if p.Update { p.Write = true
buffer.WriteRune('u') case 'd':
} p.Delete = true
if p.Process { case 'l':
buffer.WriteRune('p') p.List = true
} case 'a':
return buffer.String() p.Add = true
} case 'c':
p.Create = true
// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. case 'u':
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. p.Update = true
type AccountSASServices struct { case 'p':
Blob, Queue, File bool p.Process = true
} default:
return fmt.Errorf("Invalid permission character: '%v'", r)
// String produces the SAS services string for an Azure Storage account. }
// Call this method to set AccountSASSignatureValues's Services field. }
func (s AccountSASServices) String() string { return nil
var buffer bytes.Buffer }
if s.Blob {
buffer.WriteRune('b') // The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
} // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
if s.Queue { type AccountSASServices struct {
buffer.WriteRune('q') Blob, Queue, File bool
} }
if s.File {
buffer.WriteRune('f') // String produces the SAS services string for an Azure Storage account.
} // Call this method to set AccountSASSignatureValues's Services field.
return buffer.String() func (s AccountSASServices) String() string {
} var buffer bytes.Buffer
if s.Blob {
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. buffer.WriteRune('b')
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. }
type AccountSASResourceTypes struct { if s.Queue {
Service, Container, Object bool buffer.WriteRune('q')
} }
if s.File {
// String produces the SAS resource types string for an Azure Storage account. buffer.WriteRune('f')
// Call this method to set AccountSASSignatureValues's ResourceTypes field. }
func (rt AccountSASResourceTypes) String() string { return buffer.String()
var buffer bytes.Buffer }
if rt.Service {
buffer.WriteRune('s') // Parse initializes the AccountSASServices' fields from a string.
} func (a *AccountSASServices) Parse(s string) error {
if rt.Container { *a = AccountSASServices{} // Clear out the flags
buffer.WriteRune('c') for _, r := range s {
} switch r {
if rt.Object { case 'b':
buffer.WriteRune('o') a.Blob = true
} case 'q':
return buffer.String() a.Queue = true
} case 'f':
a.File = true
default:
return fmt.Errorf("Invalid service character: '%v'", r)
}
}
return nil
}
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
type AccountSASResourceTypes struct {
Service, Container, Object bool
}
// String produces the SAS resource types string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
func (rt AccountSASResourceTypes) String() string {
var buffer bytes.Buffer
if rt.Service {
buffer.WriteRune('s')
}
if rt.Container {
buffer.WriteRune('c')
}
if rt.Object {
buffer.WriteRune('o')
}
return buffer.String()
}
// Parse initializes the AccountSASResourceType's fields from a string.
func (rt *AccountSASResourceTypes) Parse(s string) error {
*rt = AccountSASResourceTypes{} // Clear out the flags
for _, r := range s {
switch r {
case 's':
rt.Service = true
case 'q':
rt.Container = true
case 'o':
rt.Object = true
default:
return fmt.Errorf("Invalid resource type: '%v'", r)
}
}
return nil
}

Просмотреть файл

@ -0,0 +1,211 @@
package azblob
import (
"net"
"net/url"
"strings"
"time"
)
// SASVersion indicates the SAS version.
const SASVersion = ServiceVersion
type SASProtocol string
const (
// SASProtocolHTTPS can be specified for a SAS protocol
SASProtocolHTTPS SASProtocol = "https"
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
SASProtocolHTTPSandHTTP SASProtocol = "https,http"
)
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
ss := ""
if !startTime.IsZero() {
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
}
se := ""
if !expiryTime.IsZero() {
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
}
return ss, se
}
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
//
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol SASProtocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
}
func (p *SASQueryParameters) Version() string {
return p.version
}
func (p *SASQueryParameters) Services() string {
return p.services
}
func (p *SASQueryParameters) ResourceTypes() string {
return p.resourceTypes
}
func (p *SASQueryParameters) Protocol() SASProtocol {
return p.protocol
}
func (p *SASQueryParameters) StartTime() time.Time {
return p.startTime
}
func (p *SASQueryParameters) ExpiryTime() time.Time {
return p.expiryTime
}
func (p *SASQueryParameters) IPRange() IPRange {
return p.ipRange
}
func (p *SASQueryParameters) Identifier() string {
return p.identifier
}
func (p *SASQueryParameters) Resource() string {
return p.resource
}
func (p *SASQueryParameters) Permissions() string {
return p.permissions
}
func (p *SASQueryParameters) Signature() string {
return p.signature
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
End net.IP // Not specified if length = 0
}
// String returns a string representation of an IPRange.
func (ipr *IPRange) String() string {
if len(ipr.Start) == 0 {
return ""
}
start := ipr.Start.String()
if len(ipr.End) == 0 {
return start
}
return start + "-" + ipr.End.String()
}
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
// all SAS-related query parameters are removed from the passed-in map. If
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
p := SASQueryParameters{}
for k, v := range values {
val := v[0]
isSASKey := true
switch strings.ToLower(k) {
case "sv":
p.version = val
case "ss":
p.services = val
case "srt":
p.resourceTypes = val
case "spr":
p.protocol = SASProtocol(val)
case "st":
p.startTime, _ = time.Parse(SASTimeFormat, val)
case "se":
p.expiryTime, _ = time.Parse(SASTimeFormat, val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
p.ipRange.Start = net.ParseIP(val)
} else {
p.ipRange.Start = net.ParseIP(val[:dashIndex])
p.ipRange.End = net.ParseIP(val[dashIndex+1:])
}
case "si":
p.identifier = val
case "sr":
p.resource = val
case "sp":
p.permissions = val
case "sig":
p.signature = val
default:
isSASKey = false // We didn't recognize the query parameter
}
if isSASKey && deleteSASParametersFromValues {
delete(values, k)
}
}
return p
}
// AddToValues adds the SAS components to the specified query parameters map.
func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
if p.version != "" {
v.Add("sv", p.version)
}
if p.services != "" {
v.Add("ss", p.services)
}
if p.resourceTypes != "" {
v.Add("srt", p.resourceTypes)
}
if p.protocol != "" {
v.Add("spr", string(p.protocol))
}
if !p.startTime.IsZero() {
v.Add("st", p.startTime.Format(SASTimeFormat))
}
if !p.expiryTime.IsZero() {
v.Add("se", p.expiryTime.Format(SASTimeFormat))
}
if len(p.ipRange.Start) > 0 {
v.Add("sip", p.ipRange.String())
}
if p.identifier != "" {
v.Add("si", p.identifier)
}
if p.resource != "" {
v.Add("sr", p.resource)
}
if p.permissions != "" {
v.Add("sp", p.permissions)
}
if p.signature != "" {
v.Add("sig", p.signature)
}
return v
}
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
func (p *SASQueryParameters) Encode() string {
v := url.Values{}
p.addToValues(v)
return v.Encode()
}

Просмотреть файл

@ -1,131 +1,131 @@
package azblob package azblob
// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes // https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
const ( const (
// ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
ServiceCodeNone ServiceCodeType = "" ServiceCodeNone ServiceCodeType = ""
// ServiceCodeAccountAlreadyExists means the specified account already exists. // ServiceCodeAccountAlreadyExists means the specified account already exists.
ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists"
// ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated"
// ServiceCodeAccountIsDisabled means the specified account is disabled (403). // ServiceCodeAccountIsDisabled means the specified account is disabled (403).
ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled"
// ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed"
// ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported"
// ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet"
// ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey"
// ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions"
// ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
ServiceCodeInternalError ServiceCodeType = "InternalError" ServiceCodeInternalError ServiceCodeType = "InternalError"
// ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo"
// ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue"
// ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb"
// ServiceCodeInvalidInput means one of the request inputs is not valid (400). // ServiceCodeInvalidInput means one of the request inputs is not valid (400).
ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" ServiceCodeInvalidInput ServiceCodeType = "InvalidInput"
// ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5"
// ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata"
// ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue"
// ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" ServiceCodeInvalidRange ServiceCodeType = "InvalidRange"
// ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName"
// ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" ServiceCodeInvalidURI ServiceCodeType = "InvalidUri"
// ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument"
// ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue"
// ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch"
// ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge"
// ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader"
// ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter"
// ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader"
// ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode"
// ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported"
// ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut"
// ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput"
// ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue"
// ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge"
// ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch"
// ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse"
// ServiceCodeResourceAlreadyExists means the specified resource already exists (409). // ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists"
// ServiceCodeResourceNotFound means the specified resource does not exist (404). // ServiceCodeResourceNotFound means the specified resource does not exist (404).
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
ServiceCodeServerBusy ServiceCodeType = "ServerBusy" ServiceCodeServerBusy ServiceCodeType = "ServerBusy"
// ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader"
// ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode"
// ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter"
// ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb"
) )

Просмотреть файл

@ -1,110 +1,110 @@
package azblob package azblob
import ( import (
"bytes" "bytes"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"sort" "sort"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
) )
func init() { func init() {
// wire up our custom error handling constructor // wire up our custom error handling constructor
responseErrorFactory = newStorageError responseErrorFactory = newStorageError
} }
// ServiceCodeType is a string identifying a specific container or blob error. // ServiceCodeType is a string identifying a storage service error.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
type ServiceCodeType string type ServiceCodeType string
// StorageError identifies a responder-generated network or response parsing error. // StorageError identifies a responder-generated network or response parsing error.
type StorageError interface { type StorageError interface {
// ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
ResponseError ResponseError
// ServiceCode returns a service error code. Your code can use this to make error recovery decisions. // ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
ServiceCode() ServiceCodeType ServiceCode() ServiceCodeType
} }
// storageError is the internat struct that implements the public StorageError interface. // storageError is the internal struct that implements the public StorageError interface.
type storageError struct { type storageError struct {
responseError responseError
serviceCode ServiceCodeType serviceCode ServiceCodeType
details map[string]string details map[string]string
} }
// newStorageError creates an error object that implements the error interface. // newStorageError creates an error object that implements the error interface.
func newStorageError(cause error, response *http.Response, description string) error { func newStorageError(cause error, response *http.Response, description string) error {
return &storageError{ return &storageError{
responseError: responseError{ responseError: responseError{
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
response: response, response: response,
description: description, description: description,
}, },
} }
} }
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. // ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode } func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode }
// Error implements the error interface's Error method to return a string representation of the error. // Error implements the error interface's Error method to return a string representation of the error.
func (e *storageError) Error() string { func (e *storageError) Error() string {
b := &bytes.Buffer{} b := &bytes.Buffer{}
fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode)
fmt.Fprintf(b, "Description=%s, Details: ", e.description) fmt.Fprintf(b, "Description=%s, Details: ", e.description)
if len(e.details) == 0 { if len(e.details) == 0 {
b.WriteString("(none)\n") b.WriteString("(none)\n")
} else { } else {
b.WriteRune('\n') b.WriteRune('\n')
keys := make([]string, 0, len(e.details)) keys := make([]string, 0, len(e.details))
// Alphabetize the details // Alphabetize the details
for k := range e.details { for k := range e.details {
keys = append(keys, k) keys = append(keys, k)
} }
sort.Strings(keys) sort.Strings(keys)
for _, k := range keys { for _, k := range keys {
fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) fmt.Fprintf(b, " %s: %+v\n", k, e.details[k])
} }
} }
req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil)
return e.ErrorNode.Error(b.String()) return e.ErrorNode.Error(b.String())
} }
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). // Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
func (e *storageError) Temporary() bool { func (e *storageError) Temporary() bool {
if e.response != nil { if e.response != nil {
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
return true return true
} }
} }
return e.ErrorNode.Temporary() return e.ErrorNode.Temporary()
} }
// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. // UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
tokName := "" tokName := ""
var t xml.Token var t xml.Token
for t, err = d.Token(); err == nil; t, err = d.Token() { for t, err = d.Token(); err == nil; t, err = d.Token() {
switch tt := t.(type) { switch tt := t.(type) {
case xml.StartElement: case xml.StartElement:
tokName = tt.Name.Local tokName = tt.Name.Local
break break
case xml.CharData: case xml.CharData:
switch tokName { switch tokName {
case "Code": case "Code":
e.serviceCode = ServiceCodeType(tt) e.serviceCode = ServiceCodeType(tt)
case "Message": case "Message":
e.description = string(tt) e.description = string(tt)
default: default:
if e.details == nil { if e.details == nil {
e.details = map[string]string{} e.details = map[string]string{}
} }
e.details[tokName] = string(tt) e.details[tokName] = string(tt)
} }
} }
} }
return nil return nil
} }

Просмотреть файл

@ -0,0 +1,61 @@
package azblob
import (
"errors"
"fmt"
"io"
"strconv"
)
// httpRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange
// which has an offset but na zero value count indicates from the offset to the resource's end.
type httpRange struct {
offset int64
count int64
}
func (r httpRange) pointers() *string {
if r.offset == 0 && r.count == 0 { // Do common case first for performance
return nil // No specified range
}
if r.offset < 0 {
panic("The range offset must be >= 0")
}
if r.count < 0 {
panic("The range count must be >= 0")
}
endOffset := "" // if count == 0
if r.count > 0 {
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
}
dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset)
return &dataRange
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 {
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
return 0
}
validateSeekableStreamAt0(body)
count, err := body.Seek(0, io.SeekEnd)
if err != nil {
panic("failed to seek stream")
}
body.Seek(0, io.SeekStart)
return count
}
func validateSeekableStreamAt0(body io.ReadSeeker) {
if body == nil { // nil body's are "logically" seekable to 0
return
}
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
if err != nil {
panic(err)
}
panic(errors.New("stream must be set to position 0"))
}
}

Просмотреть файл

@ -1,80 +1,80 @@
package azblob package azblob
import ( import (
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"strconv" "strconv"
) )
// The UUID reserved variants. // The UUID reserved variants.
const ( const (
reservedNCS byte = 0x80 reservedNCS byte = 0x80
reservedRFC4122 byte = 0x40 reservedRFC4122 byte = 0x40
reservedMicrosoft byte = 0x20 reservedMicrosoft byte = 0x20
reservedFuture byte = 0x00 reservedFuture byte = 0x00
) )
// A UUID representation compliant with specification in RFC 4122 document. // A UUID representation compliant with specification in RFC 4122 document.
type uuid [16]byte type uuid [16]byte
// NewUUID returns a new uuid using RFC 4122 algorithm. // NewUUID returns a new uuid using RFC 4122 algorithm.
func newUUID() (u uuid) { func newUUID() (u uuid) {
u = uuid{} u = uuid{}
// Set all bits to randomly (or pseudo-randomly) chosen values. // Set all bits to randomly (or pseudo-randomly) chosen values.
_, err := rand.Read(u[:]) _, err := rand.Read(u[:])
if err != nil { if err != nil {
panic("ran.Read failed") panic("ran.Read failed")
} }
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
var version byte = 4 var version byte = 4
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
return return
} }
// String returns an unparsed version of the generated UUID sequence. // String returns an unparsed version of the generated UUID sequence.
func (u uuid) String() string { func (u uuid) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
} }
// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" // ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. // or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
func parseUUID(uuidStr string) uuid { func parseUUID(uuidStr string) uuid {
char := func(hexString string) byte { char := func(hexString string) byte {
i, _ := strconv.ParseUint(hexString, 16, 8) i, _ := strconv.ParseUint(hexString, 16, 8)
return byte(i) return byte(i)
} }
if uuidStr[0] == '{' { if uuidStr[0] == '{' {
uuidStr = uuidStr[1:] // Skip over the '{' uuidStr = uuidStr[1:] // Skip over the '{'
} }
// 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
// 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
// 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
uuidVal := uuid{ uuidVal := uuid{
char(uuidStr[0:2]), char(uuidStr[0:2]),
char(uuidStr[2:4]), char(uuidStr[2:4]),
char(uuidStr[4:6]), char(uuidStr[4:6]),
char(uuidStr[6:8]), char(uuidStr[6:8]),
char(uuidStr[9:11]), char(uuidStr[9:11]),
char(uuidStr[11:13]), char(uuidStr[11:13]),
char(uuidStr[14:16]), char(uuidStr[14:16]),
char(uuidStr[16:18]), char(uuidStr[16:18]),
char(uuidStr[19:21]), char(uuidStr[19:21]),
char(uuidStr[21:23]), char(uuidStr[21:23]),
char(uuidStr[24:26]), char(uuidStr[24:26]),
char(uuidStr[26:28]), char(uuidStr[26:28]),
char(uuidStr[28:30]), char(uuidStr[28:30]),
char(uuidStr[30:32]), char(uuidStr[30:32]),
char(uuidStr[32:34]), char(uuidStr[32:34]),
char(uuidStr[34:36]), char(uuidStr[34:36]),
} }
return uuidVal return uuidVal
} }
func (u uuid) bytes() []byte { func (u uuid) bytes() []byte {
return u[:] return u[:]
} }

178
2017-07-29/azblob/zt_doc.go Normal file → Executable file
Просмотреть файл

@ -1,89 +1,89 @@
// Copyright 2017 Microsoft Corporation. All rights reserved. // Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT // Use of this source code is governed by an MIT
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
/* /*
Package azblob allows you to manipulate Azure Storage containers and blobs objects. Package azblob allows you to manipulate Azure Storage containers and blobs objects.
URL Types URL Types
The most common types you'll work with are the XxxURL types. The methods of these types make requests The most common types you'll work with are the XxxURL types. The methods of these types make requests
against the Azure Storage Service. against the Azure Storage Service.
- ServiceURL's methods perform operations on a storage account. - ServiceURL's methods perform operations on a storage account.
- ContainerURL's methods perform operations on an account's container. - ContainerURL's methods perform operations on an account's container.
- BlockBlobURL's methods perform operations on a container's block blob. - BlockBlobURL's methods perform operations on a container's block blob.
- AppendBlobURL's methods perform operations on a container's append blob. - AppendBlobURL's methods perform operations on a container's append blob.
- PageBlobURL's methods perform operations on a container's page blob. - PageBlobURL's methods perform operations on a container's page blob.
- BlobURL's methods perform operations on a container's blob regardless of the blob's type. - BlobURL's methods perform operations on a container's blob regardless of the blob's type.
Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP
request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed.
The pipeline specifies things like retry policies, logging, deserializaiton of HTTP response payloads, and more. The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass
an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own
URL but it shares the same pipeline as the parent ServiceURL object. URL but it shares the same pipeline as the parent ServiceURL object.
To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob.
To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL
respectively. These three types are all identical except for the methods they expose; each type exposes the methods respectively. These three types are all identical except for the methods they expose; each type exposes the methods
relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL;
this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL,
the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You
can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. can easily switch between blob types (method sets) by calling a ToXxxBlobURL method.
If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL
object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object
with the same URL as the original but with the specified pipeline. with the same URL as the original but with the specified pipeline.
Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that
XxxURL objects share a lot of system resources making them very efficient. XxxURL objects share a lot of system resources making them very efficient.
All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures,
transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an
example of how to do deal with errors. example of how to do deal with errors.
URL and Shared Access Signature Manipulation URL and Shared Access Signature Manipulation
The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the there are types The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types
for generating and parsing Shared Access Signature (SAS) for generating and parsing Shared Access Signature (SAS)
- Use the AccountSASSignatureValues type to create a SAS for a storage account - Use the AccountSASSignatureValues type to create a SAS for a storage account.
- Use the BlobSASSignatureValues type to create a SAS for a container or blob. - Use the BlobSASSignatureValues type to create a SAS for a container or blob.
- Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters.
To generate a SAS, you must use the SharedKeyCredential type. To generate a SAS, you must use the SharedKeyCredential type.
Credentials Credentials
When creating a request pipeline, you must specify one of this package's credential types. When creating a request pipeline, you must specify one of this package's credential types.
- Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS) or for requests to public resources. - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS).
- Call the NewSharedKeyCredential function (with an account name & key) to access any and all account resources. You must also use this - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this
to generate Shared Access Signatures. to generate Shared Access Signatures.
HTTP Request Policy Factories HTTP Request Policy Factories
This package defines several request policy factories for use with the pipeline package. This package defines several request policy factories for use with the pipeline package.
Most applications will not use these factories directly; instead, the NewPipeline Most applications will not use these factories directly; instead, the NewPipeline
function creates the these factories, initializes them (via the PipelineOptions type) function creates these factories, initializes them (via the PipelineOptions type)
and returns a pipeline object for use by the XxxURL objects. and returns a pipeline object for use by the XxxURL objects.
However, for advanced scenarios, developers can access these policy factory directly However, for advanced scenarios, developers can access these policy factories directly
and even create their own and then construct their own pipeline in order to affect HTTP and even create their own and then construct their own pipeline in order to affect HTTP
requests and responses performed by the XxxURL objects. For example, developers can requests and responses performed by the XxxURL objects. For example, developers can
introduce their own logging, random failures, request recording & playback for fast introduce their own logging, random failures, request recording & playback for fast
testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The
possibilities are endless! possibilities are endless!
Below are the request pipeline policy factory functions that are provided with this Below are the request pipeline policy factory functions that are provided with this
package: package:
- NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests.
- NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures.
- NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests.
- NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures.
Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline.
*/ */
package azblob package azblob
// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). // TokenCredential Use this to access resources using Role-Based Access Control (RBAC).

225
2017-07-29/azblob/zt_examples_test.go Normal file → Executable file
Просмотреть файл

@ -20,15 +20,7 @@ import (
// https://godoc.org/github.com/fluhus/godoc-tricks // https://godoc.org/github.com/fluhus/godoc-tricks
func accountInfo() (string, string) { func accountInfo() (string, string) {
return mustGetEnv("ACCOUNT_NAME"), mustGetEnv("ACCOUNT_KEY") return os.Getenv("ACCOUNT_NAME"), os.Getenv("ACCOUNT_KEY")
}
func mustGetEnv(key string) string {
v := os.Getenv(key)
if v == "" {
panic("Env variable '" + key + "' required for integration tests.")
}
return v
} }
// This example shows how to get started using the Azure Storage Blob SDK for Go. // This example shows how to get started using the Azure Storage Blob SDK for Go.
@ -74,20 +66,21 @@ func Example() {
// Create the blob with string (plain text) content. // Create the blob with string (plain text) content.
data := "Hello World!" data := "Hello World!"
_, err = blobURL.PutBlob(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) _, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Download the blob's contents and verify that it worked correctly // Download the blob's contents and verify that it worked correctly
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
downloadedData := &bytes.Buffer{} downloadedData := &bytes.Buffer{}
downloadedData.ReadFrom(get.Body()) reader := get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it downloadedData.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
if data != downloadedData.String() { if data != downloadedData.String() {
log.Fatal("downloaded data doesn't match uploaded data") log.Fatal("downloaded data doesn't match uploaded data")
} }
@ -95,7 +88,7 @@ func Example() {
// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time. // List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error. for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
// Get a result segment starting with the blob indicated by the current Marker. // Get a result segment starting with the blob indicated by the current Marker.
listBlob, err := containerURL.ListBlobs(ctx, marker, ListBlobsOptions{}) listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, ListBlobsSegmentOptions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -151,7 +144,9 @@ func ExampleNewPipeline() {
// This method is not called for filtered-out severities. // This method is not called for filtered-out severities.
logger.Output(2, m) // This example uses Go's standard logger logger.Output(2, m) // This example uses Go's standard logger
}, },
MinimumLevelToLog: func() pipeline.LogLevel { return pipeline.LogInfo }, // Log all events from informational to more severe ShouldLog: func(level pipeline.LogLevel) bool {
return level <= pipeline.LogWarning // Log all events from warning to more severe
},
}, },
} }
@ -176,7 +171,7 @@ func ExampleNewPipeline() {
// In this example, I reconfigure the retry policies, create a new pipeline, and then create a new // In this example, I reconfigure the retry policies, create a new pipeline, and then create a new
// ContainerURL object that has the same URL as its parent. // ContainerURL object that has the same URL as its parent.
po.Retry = RetryOptions{ po.Retry = RetryOptions{
Policy: RetryPolicyFixed, // Use exponential backoff as opposed to linear Policy: RetryPolicyFixed, // Use fixed time backoff
MaxTries: 4, // Try at most 3 times to perform the operation (set to 1 to disable retries) MaxTries: 4, // Try at most 3 times to perform the operation (set to 1 to disable retries)
TryTimeout: time.Minute * 1, // Maximum time allowed for any single try TryTimeout: time.Minute * 1, // Maximum time allowed for any single try
RetryDelay: time.Second * 5, // Backoff amount for each retry (exponential or linear) RetryDelay: time.Second * 5, // Backoff amount for each retry (exponential or linear)
@ -219,12 +214,12 @@ func ExampleStorageError() {
create, err := containerURL.Create(context.Background(), Metadata{}, PublicAccessNone) create, err := containerURL.Create(context.Background(), Metadata{}, PublicAccessNone)
if err != nil { // An error occurred if err != nil { // An error occurred
if serr, ok := err.(StorageError); ok { // This error is a Service-specific error if stgErr, ok := err.(StorageError); ok { // This error is a Service-specific error
// StorageError also implements net.Error so you could call its Timeout/Temporary methods if you want. // StorageError also implements net.Error so you could call its Timeout/Temporary methods if you want.
switch serr.ServiceCode() { // Compare serviceCode to various ServiceCodeXxx constants switch stgErr.ServiceCode() { // Compare serviceCode to various ServiceCodeXxx constants
case ServiceCodeContainerAlreadyExists: case ServiceCodeContainerAlreadyExists:
// You can also look at the http.Response object that failed. // You can also look at the http.Response object that failed.
if failedResponse := serr.Response(); failedResponse != nil { if failedResponse := stgErr.Response(); failedResponse != nil {
// From the response object, you can get the initiating http.Request object // From the response object, you can get the initiating http.Request object
failedRequest := failedResponse.Request failedRequest := failedResponse.Request
_ = failedRequest // Avoid compiler's "declared and not used" error _ = failedRequest // Avoid compiler's "declared and not used" error
@ -249,7 +244,7 @@ func ExampleBlobURLParts() {
// Let's start with a URL that identifies a snapshot of a blob in a container. // Let's start with a URL that identifies a snapshot of a blob in a container.
// The URL also contains a Shared Access Signature (SAS): // The URL also contains a Shared Access Signature (SAS):
u, _ := url.Parse("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" + u, _ := url.Parse("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" +
"snapshot=2011-03-09T01:42:34.9360000Z" + "snapshot=2011-03-09T01:42:34Z&" +
"sv=2015-02-21&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&" + "sv=2015-02-21&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&" +
"spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=92836758923659283652983562==") "spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=92836758923659283652983562==")
@ -259,12 +254,12 @@ func ExampleBlobURLParts() {
// Now, we access the parts (this example prints them). // Now, we access the parts (this example prints them).
fmt.Println(parts.Host, parts.ContainerName, parts.BlobName, parts.Snapshot) fmt.Println(parts.Host, parts.ContainerName, parts.BlobName, parts.Snapshot)
sas := parts.SAS sas := parts.SAS
fmt.Println(sas.Version, sas.Resource, sas.StartTime, sas.ExpiryTime, sas.Permissions, fmt.Println(sas.Version(), sas.Resource(), sas.StartTime(), sas.ExpiryTime(), sas.Permissions(),
sas.IPRange, sas.Protocol, sas.Identifier, sas.Services, sas.Signature) sas.IPRange(), sas.Protocol(), sas.Identifier(), sas.Services(), sas.Signature())
// You can then change some of the fields and construct a new URL: // You can then change some of the fields and construct a new URL:
parts.SAS = SASQueryParameters{} // Remove the SAS query parameters parts.SAS = SASQueryParameters{} // Remove the SAS query parameters
parts.Snapshot = time.Time{} // Remove the snapshot timestamp parts.Snapshot = "" // Remove the snapshot timestamp
parts.ContainerName = "othercontainer" // Change the container name parts.ContainerName = "othercontainer" // Change the container name
// In this example, we'll keep the blob name as is. // In this example, we'll keep the blob name as is.
@ -284,8 +279,8 @@ func ExampleAccountSASSignatureValues() {
// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters. // Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
sasQueryParams := AccountSASSignatureValues{ sasQueryParams := AccountSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().Add(48 * time.Hour), // 48-hours before expiration ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
Permissions: AccountSASPermissions{Read: true, List: true}.String(), Permissions: AccountSASPermissions{Read: true, List: true}.String(),
Services: AccountSASServices{Blob: true}.String(), Services: AccountSASServices{Blob: true}.String(),
ResourceTypes: AccountSASResourceTypes{Container: true, Object: true}.String(), ResourceTypes: AccountSASResourceTypes{Container: true, Object: true}.String(),
@ -305,10 +300,9 @@ func ExampleAccountSASSignatureValues() {
serviceURL := NewServiceURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{})) serviceURL := NewServiceURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
// Now, you can use this serviceURL just like any other to make requests of the resource. // Now, you can use this serviceURL just like any other to make requests of the resource.
// If you have a SAS query parameter string, you can parse it into its parts: // You can parse a URL into its constituent parts:
values, _ := url.ParseQuery(qp) blobURLParts := NewBlobURLParts(serviceURL.URL())
sasQueryParams = NewSASQueryParameters(values, true) fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
fmt.Printf("SAS expiry time=%v", sasQueryParams.ExpiryTime)
_ = serviceURL // Avoid compiler's "declared and not used" error _ = serviceURL // Avoid compiler's "declared and not used" error
} }
@ -327,8 +321,8 @@ func ExampleBlobSASSignatureValues() {
// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters. // Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
sasQueryParams := BlobSASSignatureValues{ sasQueryParams := BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().Add(48 * time.Hour), // 48-hours before expiration ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: containerName, ContainerName: containerName,
BlobName: blobName, BlobName: blobName,
@ -355,15 +349,14 @@ func ExampleBlobSASSignatureValues() {
// Now, you can use this blobURL just like any other to make requests of the resource. // Now, you can use this blobURL just like any other to make requests of the resource.
// If you have a SAS query parameter string, you can parse it into its parts: // If you have a SAS query parameter string, you can parse it into its parts:
values, _ := url.ParseQuery(qp) blobURLParts := NewBlobURLParts(blobURL.URL())
sasQueryParams = NewSASQueryParameters(values, true) fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
fmt.Printf("SAS expiry time=%v", sasQueryParams.ExpiryTime)
_ = blobURL // Avoid compiler's "declared and not used" error _ = blobURL // Avoid compiler's "declared and not used" error
} }
// This example shows how to manipulate a container's permissions. // This example shows how to manipulate a container's permissions.
func ExampleContainerURL_SetPermissions() { func ExampleContainerURL_SetContainerAccessPolicy() {
// From the Azure portal, get your Storage account's name and account key. // From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo() accountName, accountKey := accountInfo()
@ -388,7 +381,7 @@ func ExampleContainerURL_SetPermissions() {
blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
// Create the blob and put some text in it // Create the blob and put some text in it
_, err = blobURL.PutBlob(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) _, err = blobURL.Upload(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -403,7 +396,7 @@ func ExampleContainerURL_SetPermissions() {
// We expected this error because the service returns an HTTP 404 status code when a blob // We expected this error because the service returns an HTTP 404 status code when a blob
// exists but the requester does not have permission to access it. // exists but the requester does not have permission to access it.
// This is how we change the container's permission to allow public/anonymous aceess: // This is how we change the container's permission to allow public/anonymous aceess:
_, err := containerURL.SetPermissions(ctx, PublicAccessBlob, []SignedIdentifier{}, ContainerAccessConditions{}) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, []SignedIdentifier{}, ContainerAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -426,7 +419,7 @@ func ExampleBlobAccessConditions() {
accountName, accountKey := accountInfo() accountName, accountKey := accountInfo()
// Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/Data,txt", accountName)) u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/Data.txt", accountName))
blobURL := NewBlockBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{})) blobURL := NewBlockBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
@ -434,42 +427,42 @@ func ExampleBlobAccessConditions() {
// This helper function displays the results of an operation; it is called frequently below. // This helper function displays the results of an operation; it is called frequently below.
showResult := func(response pipeline.Response, err error) { showResult := func(response pipeline.Response, err error) {
if err != nil { if err != nil {
if serr, ok := err.(StorageError); !ok { if stgErr, ok := err.(StorageError); !ok {
log.Fatal(err) // Network failure log.Fatal(err) // Network failure
} else { } else {
fmt.Print("Failure: " + serr.Response().Status + "\n") fmt.Print("Failure: " + stgErr.Response().Status + "\n")
} }
} else { } else {
if get, ok := response.(*GetResponse); ok { if get, ok := response.(*DownloadResponse); ok {
get.Body().Close() // The client must close the response body when finished with it get.Body(RetryReaderOptions{}).Close() // The client must close the response body when finished with it
} }
fmt.Print("Success: " + response.Response().Status + "\n") fmt.Print("Success: " + response.Response().Status + "\n")
} }
} }
// Create the blob (unconditionally; succeeds) // Create the blob (unconditionally; succeeds)
put, err := blobURL.PutBlob(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) upload, err := blobURL.Upload(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
showResult(put, err) showResult(upload, err)
// Download blob content if the blob has been modified since we uploaded it (fails): // Download blob content if the blob has been modified since we uploaded it (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{}, showResult(blobURL.Download(ctx, 0, 0,
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfModifiedSince: put.LastModified()}}, false)) BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfModifiedSince: upload.LastModified()}}, false))
// Download blob content if the blob hasn't been modified in the last 24 hours (fails): // Download blob content if the blob hasn't been modified in the last 24 hours (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{}, showResult(blobURL.Download(ctx, 0, 0,
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false)) BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false))
// Upload new content if the blob hasn't changed since the version identified by ETag (succeeds): // Upload new content if the blob hasn't changed since the version identified by ETag (succeeds):
put, err = blobURL.PutBlob(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{}, upload, err = blobURL.Upload(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfMatch: put.ETag()}}) BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfMatch: upload.ETag()}})
showResult(put, err) showResult(upload, err)
// Download content if it has changed since the version identified by ETag (fails): // Download content if it has changed since the version identified by ETag (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{}, showResult(blobURL.Download(ctx, 0, 0,
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: put.ETag()}}, false)) BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: upload.ETag()}}, false))
// Upload content if the blob doesn't already exist (fails): // Upload content if the blob doesn't already exist (fails):
showResult(blobURL.PutBlob(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{}, showResult(blobURL.Upload(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: ETagAny}})) BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: ETagAny}}))
} }
@ -489,13 +482,13 @@ func ExampleMetadata_containers() {
// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service. // NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key. // Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
creatingApp, _ := os.Executable() creatingApp, _ := os.Executable()
_, err := containerURL.Create(ctx, Metadata{"createdby": "Jeffrey", "app": creatingApp}, PublicAccessNone) _, err := containerURL.Create(ctx, Metadata{"author": "Jeffrey", "app": creatingApp}, PublicAccessNone)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Query the container's metadata // Query the container's metadata
get, err := containerURL.GetPropertiesAndMetadata(ctx, LeaseAccessConditions{}) get, err := containerURL.GetProperties(ctx, LeaseAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -507,7 +500,7 @@ func ExampleMetadata_containers() {
} }
// Update the metadata and write it back to the container // Update the metadata and write it back to the container
metadata["createdby"] = "Aidan" // NOTE: The keyname is in all lowercase letters metadata["author"] = "Aidan" // NOTE: The keyname is in all lowercase letters
_, err = containerURL.SetMetadata(ctx, metadata, ContainerAccessConditions{}) _, err = containerURL.SetMetadata(ctx, metadata, ContainerAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -533,14 +526,14 @@ func ExampleMetadata_blobs() {
// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service. // NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key. // Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
creatingApp, _ := os.Executable() creatingApp, _ := os.Executable()
_, err := blobURL.PutBlob(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, _, err := blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{},
Metadata{"createdby": "Jeffrey", "app": creatingApp}, BlobAccessConditions{}) Metadata{"author": "Jeffrey", "app": creatingApp}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Query the blob's properties and metadata // Query the blob's properties and metadata
get, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{}) get, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -555,7 +548,7 @@ func ExampleMetadata_blobs() {
} }
// Update the blob's metadata and write it back to the blob // Update the blob's metadata and write it back to the blob
metadata["updatedby"] = "Grant" // Add a new key/value; NOTE: The keyname is in all lowercase letters metadata["editor"] = "Grant" // Add a new key/value; NOTE: The keyname is in all lowercase letters
_, err = blobURL.SetMetadata(ctx, metadata, BlobAccessConditions{}) _, err = blobURL.SetMetadata(ctx, metadata, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -578,7 +571,7 @@ func ExampleBlobHTTPHeaders() {
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
// Create a blob with HTTP headers // Create a blob with HTTP headers
_, err := blobURL.PutBlob(ctx, strings.NewReader("Some text"), _, err := blobURL.Upload(ctx, strings.NewReader("Some text"),
BlobHTTPHeaders{ BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8", ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment", ContentDisposition: "attachment",
@ -588,7 +581,7 @@ func ExampleBlobHTTPHeaders() {
} }
// GetMetadata returns the blob's properties, HTTP headers, and metadata // GetMetadata returns the blob's properties, HTTP headers, and metadata
get, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{}) get, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -602,7 +595,7 @@ func ExampleBlobHTTPHeaders() {
// Update the blob's HTTP Headers and write them back to the blob // Update the blob's HTTP Headers and write them back to the blob
httpHeaders.ContentType = "text/plain" httpHeaders.ContentType = "text/plain"
_, err = blobURL.SetProperties(ctx, httpHeaders, BlobAccessConditions{}) _, err = blobURL.SetHTTPHeaders(ctx, httpHeaders, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -651,14 +644,14 @@ func ExampleBlockBlobURL() {
base64BlockIDs[index] = blockIDIntToBase64(index) // Some people use UUIDs for block IDs base64BlockIDs[index] = blockIDIntToBase64(index) // Some people use UUIDs for block IDs
// Upload a block to this blob specifying the Block ID and its content (up to 100MB); this block is uncommitted. // Upload a block to this blob specifying the Block ID and its content (up to 100MB); this block is uncommitted.
_, err := blobURL.PutBlock(ctx, base64BlockIDs[index], strings.NewReader(word), LeaseAccessConditions{}) _, err := blobURL.StageBlock(ctx, base64BlockIDs[index], strings.NewReader(word), LeaseAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
} }
// After all the blocks are uploaded, atomically commit them to the blob. // After all the blocks are uploaded, atomically commit them to the blob.
_, err := blobURL.PutBlockList(ctx, base64BlockIDs, Metadata{}, BlobHTTPHeaders{}, BlobAccessConditions{}) _, err := blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -674,13 +667,14 @@ func ExampleBlockBlobURL() {
// Download the blob in its entirety; download operations do not take blocks into account. // Download the blob in its entirety; download operations do not take blocks into account.
// NOTE: For really large blobs, downloading them like allocates a lot of memory. // NOTE: For really large blobs, downloading them like allocates a lot of memory.
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
blobData := &bytes.Buffer{} blobData := &bytes.Buffer{}
blobData.ReadFrom(get.Body()) reader := get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it blobData.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
fmt.Println(blobData) fmt.Println(blobData)
} }
@ -696,7 +690,7 @@ func ExampleAppendBlobURL() {
appendBlobURL := NewAppendBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{})) appendBlobURL := NewAppendBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
_, err := appendBlobURL.Create(ctx, Metadata{}, BlobHTTPHeaders{}, BlobAccessConditions{}) _, err := appendBlobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -709,13 +703,14 @@ func ExampleAppendBlobURL() {
} }
// Download the entire append blob's contents and show it. // Download the entire append blob's contents and show it.
get, err := appendBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := appendBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
b := bytes.Buffer{} b := bytes.Buffer{}
b.ReadFrom(get.Body()) reader := get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it b.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
fmt.Println(b.String()) fmt.Println(b.String())
} }
@ -730,27 +725,25 @@ func ExamplePageBlobURL() {
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{})) NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
_, err := blobURL.Create(ctx, PageBlobPageBytes*4, 0, Metadata{}, BlobHTTPHeaders{}, BlobAccessConditions{}) _, err := blobURL.Create(ctx, PageBlobPageBytes*4, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
page := [PageBlobPageBytes]byte{} page := [PageBlobPageBytes]byte{}
copy(page[:], "Page 0") copy(page[:], "Page 0")
_, err = blobURL.PutPages(ctx, PageRange{Start: 0 * PageBlobPageBytes, End: 1*PageBlobPageBytes - 1}, _, err = blobURL.UploadPages(ctx, 0*PageBlobPageBytes, bytes.NewReader(page[:]), BlobAccessConditions{})
bytes.NewReader(page[:]), BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
copy(page[:], "Page 1") copy(page[:], "Page 1")
_, err = blobURL.PutPages(ctx, PageRange{Start: 2 * PageBlobPageBytes, End: 3*PageBlobPageBytes - 1}, _, err = blobURL.UploadPages(ctx, 2*PageBlobPageBytes, bytes.NewReader(page[:]), BlobAccessConditions{})
bytes.NewReader(page[:]), BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
getPages, err := blobURL.GetPageRanges(ctx, BlobRange{Offset: 0 * PageBlobPageBytes, Count: 10*PageBlobPageBytes - 1}, BlobAccessConditions{}) getPages, err := blobURL.GetPageRanges(ctx, 0*PageBlobPageBytes, 10*PageBlobPageBytes, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -758,12 +751,12 @@ func ExamplePageBlobURL() {
fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End) fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
} }
_, err = blobURL.ClearPages(ctx, PageRange{Start: 0 * PageBlobPageBytes, End: 1*PageBlobPageBytes - 1}, BlobAccessConditions{}) _, err = blobURL.ClearPages(ctx, 0*PageBlobPageBytes, 1*PageBlobPageBytes, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
getPages, err = blobURL.GetPageRanges(ctx, BlobRange{Offset: 0 * PageBlobPageBytes, Count: 10*PageBlobPageBytes - 1}, BlobAccessConditions{}) getPages, err = blobURL.GetPageRanges(ctx, 0*PageBlobPageBytes, 10*PageBlobPageBytes, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -771,13 +764,14 @@ func ExamplePageBlobURL() {
fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End) fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
} }
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
blobData := &bytes.Buffer{} blobData := &bytes.Buffer{}
blobData.ReadFrom(get.Body()) reader := get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it blobData.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
fmt.Printf("%#v", blobData.Bytes()) fmt.Printf("%#v", blobData.Bytes())
} }
@ -798,7 +792,7 @@ func Example_blobSnapshots() {
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
// Create the original blob: // Create the original blob:
_, err := baseBlobURL.PutBlob(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) _, err := baseBlobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -808,33 +802,35 @@ func Example_blobSnapshots() {
snapshot := createSnapshot.Snapshot() snapshot := createSnapshot.Snapshot()
// Modify the original blob & show it: // Modify the original blob & show it:
_, err = baseBlobURL.PutBlob(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) _, err = baseBlobURL.Upload(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
get, err := baseBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := baseBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
b := bytes.Buffer{} b := bytes.Buffer{}
b.ReadFrom(get.Body()) reader := get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it b.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
fmt.Println(b.String()) fmt.Println(b.String())
// Show snapshot blob via original blob URI & snapshot time: // Show snapshot blob via original blob URI & snapshot time:
snapshotBlobURL := baseBlobURL.WithSnapshot(snapshot) snapshotBlobURL := baseBlobURL.WithSnapshot(snapshot)
get, err = snapshotBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err = snapshotBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
b.Reset() b.Reset()
b.ReadFrom(get.Body()) reader = get.Body(RetryReaderOptions{})
get.Body().Close() // The client must close the response body when finished with it b.ReadFrom(reader)
reader.Close() // The client must close the response body when finished with it
fmt.Println(b.String()) fmt.Println(b.String())
// FYI: You can get the base blob URL from one of its snapshot by passing time.Time{} to WithSnapshot: // FYI: You can get the base blob URL from one of its snapshot by passing "" to WithSnapshot:
baseBlobURL = snapshotBlobURL.WithSnapshot(time.Time{}) baseBlobURL = snapshotBlobURL.WithSnapshot("")
// Show all blobs in the container with their snapshots: // Show all blobs in the container with their snapshots:
// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time. // List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error. for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
// Get a result segment starting with the blob indicated by the current Marker. // Get a result segment starting with the blob indicated by the current Marker.
listBlobs, err := containerURL.ListBlobs(ctx, marker, ListBlobsOptions{ listBlobs, err := containerURL.ListBlobsFlatSegment(ctx, marker, ListBlobsSegmentOptions{
Details: BlobListingDetails{Snapshots: true}}) Details: BlobListingDetails{Snapshots: true}})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -846,15 +842,15 @@ func Example_blobSnapshots() {
// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
for _, blobInfo := range listBlobs.Blobs.Blob { for _, blobInfo := range listBlobs.Blobs.Blob {
snaptime := "N/A" snaptime := "N/A"
if !blobInfo.Snapshot.IsZero() { if blobInfo.Snapshot != "" {
snaptime = blobInfo.Snapshot.String() snaptime = blobInfo.Snapshot
} }
fmt.Printf("Blob name: %s, Snapshot: %s\n", blobInfo.Name, snaptime) fmt.Printf("Blob name: %s, Snapshot: %s\n", blobInfo.Name, snaptime)
} }
} }
// Promote read-only snapshot to writable base blob: // Promote read-only snapshot to writable base blob:
_, err = baseBlobURL.StartCopy(ctx, snapshotBlobURL.URL(), Metadata{}, BlobAccessConditions{}, BlobAccessConditions{}) _, err = baseBlobURL.StartCopyFromURL(ctx, snapshotBlobURL.URL(), Metadata{}, BlobAccessConditions{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -889,7 +885,7 @@ func Example_progressUploadDownload() {
requestBody := strings.NewReader("Some text to write") requestBody := strings.NewReader("Some text to write")
// Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting. // Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting.
_, err := blobURL.PutBlob(ctx, _, err := blobURL.Upload(ctx,
pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) { pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) {
fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Len()) fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Len())
}), }),
@ -902,14 +898,16 @@ func Example_progressUploadDownload() {
} }
// Here's how to read the blob's data with progress reporting: // Here's how to read the blob's data with progress reporting:
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false) get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Wrap the response body in a ResponseBodyProgress and pass a callback function for progress reporting. // Wrap the response body in a ResponseBodyProgress and pass a callback function for progress reporting.
responseBody := pipeline.NewResponseBodyProgress(get.Body(), func(bytesTransferred int64) { responseBody := pipeline.NewResponseBodyProgress(get.Body(RetryReaderOptions{}),
fmt.Printf("Read %d of %d bytes.", bytesTransferred, get.ContentLength()) func(bytesTransferred int64) {
}) fmt.Printf("Read %d of %d bytes.", bytesTransferred, get.ContentLength())
})
downloadedData := &bytes.Buffer{} downloadedData := &bytes.Buffer{}
downloadedData.ReadFrom(responseBody) downloadedData.ReadFrom(responseBody)
@ -931,18 +929,16 @@ func ExampleBlobURL_startCopy() {
ctx := context.Background() // This example uses a never-expiring context ctx := context.Background() // This example uses a never-expiring context
src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg") src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg")
startCopy, err := blobURL.StartCopy(ctx, *src, nil, BlobAccessConditions{}, BlobAccessConditions{}) startCopy, err := blobURL.StartCopyFromURL(ctx, *src, nil, BlobAccessConditions{}, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
//abortCopy, err := blobURL.AbortCopy(ct, copyID, LeaseAccessConditions{})
copyID := startCopy.CopyID() copyID := startCopy.CopyID()
copyStatus := startCopy.CopyStatus() copyStatus := startCopy.CopyStatus()
for copyStatus == CopyStatusPending { for copyStatus == CopyStatusPending {
time.Sleep(time.Second * 2) time.Sleep(time.Second * 2)
getMetadata, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{}) getMetadata, err := blobURL.GetProperties(ctx, BlobAccessConditions{})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -1000,18 +996,11 @@ func ExampleNewDownloadStream() {
contentLength := int64(0) // Used for progress reporting to report the total number of bytes being downloaded. contentLength := int64(0) // Used for progress reporting to report the total number of bytes being downloaded.
// NewGetRetryStream creates an intelligent retryable stream around a blob; it returns an io.ReadCloser. // NewGetRetryStream creates an intelligent retryable stream around a blob; it returns an io.ReadCloser.
rs := NewDownloadStream(context.Background(), dr, err := blobURL.Download(context.TODO(), 0, -1, BlobAccessConditions{}, false)
// We pass more tha "blobUrl.GetBlob" here so we can capture the blob's full if err != nil {
// content length on the very first internal call to Read. log.Fatal(err)
func(ctx context.Context, blobRange BlobRange, ac BlobAccessConditions, rangeGetContentMD5 bool) (*GetResponse, error) { }
get, err := blobURL.GetBlob(ctx, blobRange, ac, rangeGetContentMD5) rs := dr.Body(RetryReaderOptions{})
if err == nil && contentLength == 0 {
// If 1st successful Get, record blob's full size for progress reporting
contentLength = get.ContentLength()
}
return get, err
},
DownloadStreamOptions{})
// NewResponseBodyStream wraps the GetRetryStream with progress reporting; it returns an io.ReadCloser. // NewResponseBodyStream wraps the GetRetryStream with progress reporting; it returns an io.ReadCloser.
stream := pipeline.NewResponseBodyProgress(rs, stream := pipeline.NewResponseBodyProgress(rs,

408
2017-07-29/azblob/zt_policy_retry_test.go Normal file → Executable file
Просмотреть файл

@ -1,204 +1,204 @@
package azblob_test package azblob_test
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"time" "time"
chk "gopkg.in/check.v1" chk "gopkg.in/check.v1"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/2016-05-31/azblob" "github.com/Azure/azure-storage-blob-go/2016-05-31/azblob"
) )
// For testing docs, see: https://labix.org/gocheck // For testing docs, see: https://labix.org/gocheck
// To test a specific test: go test -check.f MyTestSuite // To test a specific test: go test -check.f MyTestSuite
type retryTestScenario int32 type retryTestScenario int32
const ( const (
// Retry until success. Max reties hit. Operation time out prevents additional retries // Retry until success. Max reties hit. Operation time out prevents additional retries
retryTestScenarioRetryUntilSuccess retryTestScenario = 1 retryTestScenarioRetryUntilSuccess retryTestScenario = 1
retryTestScenarioRetryUntilOperationCancel retryTestScenario = 2 retryTestScenarioRetryUntilOperationCancel retryTestScenario = 2
retryTestScenarioRetryUntilMaxRetries retryTestScenario = 3 retryTestScenarioRetryUntilMaxRetries retryTestScenario = 3
) )
func (s *aztestsSuite) TestRetryTestScenarioUntilSuccess(c *chk.C) { func (s *aztestsSuite) TestRetryTestScenarioUntilSuccess(c *chk.C) {
testRetryTestScenario(c, retryTestScenarioRetryUntilSuccess) testRetryTestScenario(c, retryTestScenarioRetryUntilSuccess)
} }
func (s *aztestsSuite) TestRetryTestScenarioUntilOperationCancel(c *chk.C) { func (s *aztestsSuite) TestRetryTestScenarioUntilOperationCancel(c *chk.C) {
testRetryTestScenario(c, retryTestScenarioRetryUntilOperationCancel) testRetryTestScenario(c, retryTestScenarioRetryUntilOperationCancel)
} }
func (s *aztestsSuite) TestRetryTestScenarioUntilMaxRetries(c *chk.C) { func (s *aztestsSuite) TestRetryTestScenarioUntilMaxRetries(c *chk.C) {
testRetryTestScenario(c, retryTestScenarioRetryUntilMaxRetries) testRetryTestScenario(c, retryTestScenarioRetryUntilMaxRetries)
} }
func newRetryTestPolicyFactory(c *chk.C, scenario retryTestScenario, maxRetries int32, cancel context.CancelFunc) *retryTestPolicyFactory { func newRetryTestPolicyFactory(c *chk.C, scenario retryTestScenario, maxRetries int32, cancel context.CancelFunc) *retryTestPolicyFactory {
return &retryTestPolicyFactory{c: c, scenario: scenario, maxRetries: maxRetries, cancel: cancel} return &retryTestPolicyFactory{c: c, scenario: scenario, maxRetries: maxRetries, cancel: cancel}
} }
type retryTestPolicyFactory struct { type retryTestPolicyFactory struct {
c *chk.C c *chk.C
scenario retryTestScenario scenario retryTestScenario
maxRetries int32 maxRetries int32
cancel context.CancelFunc cancel context.CancelFunc
try int32 try int32
} }
func (f *retryTestPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { func (f *retryTestPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
f.try = 0 // Reset this for each test f.try = 0 // Reset this for each test
return &retryTestPolicy{factory: f, next: next} return &retryTestPolicy{factory: f, next: next}
} }
type retryTestPolicy struct { type retryTestPolicy struct {
next pipeline.Policy next pipeline.Policy
factory *retryTestPolicyFactory factory *retryTestPolicyFactory
} }
type retryError struct { type retryError struct {
temporary, timeout bool temporary, timeout bool
} }
func (e *retryError) Temporary() bool { return e.temporary } func (e *retryError) Temporary() bool { return e.temporary }
func (e *retryError) Timeout() bool { return e.timeout } func (e *retryError) Timeout() bool { return e.timeout }
func (e *retryError) Error() string { func (e *retryError) Error() string {
return fmt.Sprintf("Temporary=%t, Timeout=%t", e.Temporary(), e.Timeout()) return fmt.Sprintf("Temporary=%t, Timeout=%t", e.Temporary(), e.Timeout())
} }
type httpResponse struct { type httpResponse struct {
response *http.Response response *http.Response
} }
func (r *httpResponse) Response() *http.Response { return r.response } func (r *httpResponse) Response() *http.Response { return r.response }
func (p *retryTestPolicy) Do(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { func (p *retryTestPolicy) Do(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
c := p.factory.c c := p.factory.c
p.factory.try++ // Increment the try p.factory.try++ // Increment the try
c.Assert(p.factory.try <= p.factory.maxRetries, chk.Equals, true) // Ensure # of tries < MaxRetries c.Assert(p.factory.try <= p.factory.maxRetries, chk.Equals, true) // Ensure # of tries < MaxRetries
req := request.Request req := request.Request
// Validate the expected pre-conditions for each try // Validate the expected pre-conditions for each try
expectedHost := "PrimaryDC" expectedHost := "PrimaryDC"
if p.factory.try%2 == 0 { if p.factory.try%2 == 0 {
if p.factory.scenario != retryTestScenarioRetryUntilSuccess || p.factory.try <= 4 { if p.factory.scenario != retryTestScenarioRetryUntilSuccess || p.factory.try <= 4 {
expectedHost = "SecondaryDC" expectedHost = "SecondaryDC"
} }
} }
c.Assert(req.URL.Host, chk.Equals, expectedHost) // Ensure we got the expected primary/secondary DC c.Assert(req.URL.Host, chk.Equals, expectedHost) // Ensure we got the expected primary/secondary DC
// Ensure that any headers & query parameters this method adds (later) are removed/reset for each try // Ensure that any headers & query parameters this method adds (later) are removed/reset for each try
c.Assert(req.Header.Get("TestHeader"), chk.Equals, "") // Ensure our "TestHeader" is not in the HTTP request c.Assert(req.Header.Get("TestHeader"), chk.Equals, "") // Ensure our "TestHeader" is not in the HTTP request
values := req.URL.Query() values := req.URL.Query()
c.Assert(len(values["TestQueryParam"]), chk.Equals, 0) // TestQueryParam shouldn't be in the HTTP request c.Assert(len(values["TestQueryParam"]), chk.Equals, 0) // TestQueryParam shouldn't be in the HTTP request
if seeker, ok := req.Body.(io.ReadSeeker); !ok { if seeker, ok := req.Body.(io.ReadSeeker); !ok {
c.Fail() // Body must be an io.ReadSeeker c.Fail() // Body must be an io.ReadSeeker
} else { } else {
pos, err := seeker.Seek(0, io.SeekCurrent) pos, err := seeker.Seek(0, io.SeekCurrent)
c.Assert(err, chk.IsNil) // Ensure that body was seekable c.Assert(err, chk.IsNil) // Ensure that body was seekable
c.Assert(pos, chk.Equals, int64(0)) // Ensure body seeked back to position 0 c.Assert(pos, chk.Equals, int64(0)) // Ensure body seeked back to position 0
} }
// Add a query param & header; these not be here on the next try // Add a query param & header; these not be here on the next try
values["TestQueryParam"] = []string{"TestQueryParamValue"} values["TestQueryParam"] = []string{"TestQueryParamValue"}
req.Header.Set("TestHeader", "TestValue") // Add a header this not exist with each try req.Header.Set("TestHeader", "TestValue") // Add a header this not exist with each try
b := []byte{0} b := []byte{0}
n, err := req.Body.Read(b) n, err := req.Body.Read(b)
c.Assert(n, chk.Equals, 1) // Read failed c.Assert(n, chk.Equals, 1) // Read failed
switch p.factory.scenario { switch p.factory.scenario {
case retryTestScenarioRetryUntilSuccess: case retryTestScenarioRetryUntilSuccess:
switch p.factory.try { switch p.factory.try {
case 1: case 1:
if deadline, ok := ctx.Deadline(); ok { if deadline, ok := ctx.Deadline(); ok {
time.Sleep(time.Until(deadline) + time.Second) // Let the context timeout expire time.Sleep(time.Until(deadline) + time.Second) // Let the context timeout expire
} }
err = ctx.Err() err = ctx.Err()
case 2: case 2:
err = &retryError{temporary: true} err = &retryError{temporary: true}
case 3: case 3:
err = &retryError{timeout: true} err = &retryError{timeout: true}
case 4: case 4:
response = &httpResponse{response: &http.Response{StatusCode: http.StatusNotFound}} response = &httpResponse{response: &http.Response{StatusCode: http.StatusNotFound}}
case 5: case 5:
err = &retryError{temporary: true} // These attempts all fail but we're making sure we never see the secondary DC again err = &retryError{temporary: true} // These attempts all fail but we're making sure we never see the secondary DC again
case 6: case 6:
response = &httpResponse{response: &http.Response{StatusCode: http.StatusOK}} // Stop retries with valid response response = &httpResponse{response: &http.Response{StatusCode: http.StatusOK}} // Stop retries with valid response
default: default:
c.Fail() // Retries should have stopped so we shouldn't get here c.Fail() // Retries should have stopped so we shouldn't get here
} }
case retryTestScenarioRetryUntilOperationCancel: case retryTestScenarioRetryUntilOperationCancel:
switch p.factory.try { switch p.factory.try {
case 1: case 1:
p.factory.cancel() p.factory.cancel()
err = context.Canceled err = context.Canceled
default: default:
c.Fail() // Retries should have stopped so we shouldn't get here c.Fail() // Retries should have stopped so we shouldn't get here
} }
case retryTestScenarioRetryUntilMaxRetries: case retryTestScenarioRetryUntilMaxRetries:
err = &retryError{temporary: true} // Keep retrying until maxRetries is hit err = &retryError{temporary: true} // Keep retrying until maxRetries is hit
} }
return response, err // Return the response & err return response, err // Return the response & err
} }
func testRetryTestScenario(c *chk.C, scenario retryTestScenario) { func testRetryTestScenario(c *chk.C, scenario retryTestScenario) {
u, _ := url.Parse("http://PrimaryDC") u, _ := url.Parse("http://PrimaryDC")
retryOptions := azblob.RetryOptions{ retryOptions := azblob.RetryOptions{
Policy: azblob.RetryPolicyExponential, Policy: azblob.RetryPolicyExponential,
MaxTries: 6, MaxTries: 6,
TryTimeout: 2 * time.Second, TryTimeout: 2 * time.Second,
RetryDelay: 1 * time.Second, RetryDelay: 1 * time.Second,
MaxRetryDelay: 4 * time.Second, MaxRetryDelay: 4 * time.Second,
RetryReadsFromSecondaryHost: "SecondaryDC", RetryReadsFromSecondaryHost: "SecondaryDC",
} }
ctx := context.Background() ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, 64 /*2^MaxTries(6)*/ *retryOptions.TryTimeout) ctx, cancel := context.WithTimeout(ctx, 64 /*2^MaxTries(6)*/ *retryOptions.TryTimeout)
retrytestPolicyFactory := newRetryTestPolicyFactory(c, scenario, retryOptions.MaxTries, cancel) retrytestPolicyFactory := newRetryTestPolicyFactory(c, scenario, retryOptions.MaxTries, cancel)
factories := [...]pipeline.Factory{ factories := [...]pipeline.Factory{
azblob.NewRetryPolicyFactory(retryOptions), azblob.NewRetryPolicyFactory(retryOptions),
retrytestPolicyFactory, retrytestPolicyFactory,
} }
p := pipeline.NewPipeline(factories[:], pipeline.Options{}) p := pipeline.NewPipeline(factories[:], pipeline.Options{})
request, err := pipeline.NewRequest(http.MethodGet, *u, strings.NewReader("TestData")) request, err := pipeline.NewRequest(http.MethodGet, *u, strings.NewReader("TestData"))
response, err := p.Do(ctx, nil, request) response, err := p.Do(ctx, nil, request)
switch scenario { switch scenario {
case retryTestScenarioRetryUntilSuccess: case retryTestScenarioRetryUntilSuccess:
if err != nil || response == nil || response.Response() == nil || response.Response().StatusCode != http.StatusOK { if err != nil || response == nil || response.Response() == nil || response.Response().StatusCode != http.StatusOK {
c.Fail() // Operation didn't run to success c.Fail() // Operation didn't run to success
} }
case retryTestScenarioRetryUntilMaxRetries: case retryTestScenarioRetryUntilMaxRetries:
c.Assert(err, chk.NotNil) // Ensure we ended with an error c.Assert(err, chk.NotNil) // Ensure we ended with an error
c.Assert(response, chk.IsNil) // Ensure we ended without a valid response c.Assert(response, chk.IsNil) // Ensure we ended without a valid response
c.Assert(retrytestPolicyFactory.try, chk.Equals, retryOptions.MaxTries) // Ensure the operation ends with the exact right number of tries c.Assert(retrytestPolicyFactory.try, chk.Equals, retryOptions.MaxTries) // Ensure the operation ends with the exact right number of tries
case retryTestScenarioRetryUntilOperationCancel: case retryTestScenarioRetryUntilOperationCancel:
c.Assert(err, chk.Equals, context.Canceled) // Ensure we ended due to cancellation c.Assert(err, chk.Equals, context.Canceled) // Ensure we ended due to cancellation
c.Assert(response, chk.IsNil) // Ensure we ended without a valid response c.Assert(response, chk.IsNil) // Ensure we ended without a valid response
c.Assert(retrytestPolicyFactory.try <= retryOptions.MaxTries, chk.Equals, true) // Ensure we didn't end due to reaching max tries c.Assert(retrytestPolicyFactory.try <= retryOptions.MaxTries, chk.Equals, true) // Ensure we didn't end due to reaching max tries
} }
cancel() cancel()
} }
/* /*
Fail primary; retry should be on secondary URL - maybe do this twice Fail primary; retry should be on secondary URL - maybe do this twice
Fail secondary; and never see primary again Fail secondary; and never see primary again
Make sure any mutations are lost on each retry Make sure any mutations are lost on each retry
Make sure body is reset on each retry Make sure body is reset on each retry
Timeout a try; should retry (unless no more) Timeout a try; should retry (unless no more)
timeout an operation; should not retry timeout an operation; should not retry
check timeout query param; should be try timeout check timeout query param; should be try timeout
Return Temporary() = true; should retry (unless max) Return Temporary() = true; should retry (unless max)
Return Timeout() true; should retry (unless max) Return Timeout() true; should retry (unless max)
Secondary try returns 404; no more tries against secondary Secondary try returns 404; no more tries against secondary
error where Temporary() and Timeout() return false; no retry error where Temporary() and Timeout() return false; no retry
error where Temporary() & Timeout don't exist; no retry error where Temporary() & Timeout don't exist; no retry
no error; no retry; return success, nil no error; no retry; return success, nil
*/ */

1143
2017-07-29/azblob/zt_test.go Normal file → Executable file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

6
2017-07-29/azblob/zt_url_append_blob_test.go Normal file → Executable file
Просмотреть файл

@ -18,7 +18,7 @@ func (b *AppendBlobURLSuite) TestAppendBlock(c *chk.C) {
blob := container.NewAppendBlobURL(generateBlobName()) blob := container.NewAppendBlobURL(generateBlobName())
resp, err := blob.Create(context.Background(), nil, azblob.BlobHTTPHeaders{}, azblob.BlobAccessConditions{}) resp, err := blob.Create(context.Background(), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode(), chk.Equals, 201) c.Assert(resp.StatusCode(), chk.Equals, 201)
@ -26,7 +26,7 @@ func (b *AppendBlobURLSuite) TestAppendBlock(c *chk.C) {
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(appendResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendResp.Response().StatusCode, chk.Equals, 201)
c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0")
c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, "1") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(1))
c.Assert(appendResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(appendResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(appendResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(appendResp.ContentMD5(), chk.Not(chk.Equals), "")
@ -37,5 +37,5 @@ func (b *AppendBlobURLSuite) TestAppendBlock(c *chk.C) {
appendResp, err = blob.AppendBlock(context.Background(), getReaderToRandomBytes(1024), azblob.BlobAccessConditions{}) appendResp, err = blob.AppendBlock(context.Background(), getReaderToRandomBytes(1024), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "1024") c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "1024")
c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, "2") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(2))
} }

124
2017-07-29/azblob/zt_url_blob_test.go Normal file → Executable file
Просмотреть файл

@ -38,7 +38,7 @@ func (b *BlobURLSuite) TestCreateDelete(c *chk.C) {
blob := container.NewBlockBlobURL(generateBlobName()) blob := container.NewBlockBlobURL(generateBlobName())
putResp, err := blob.PutBlob(context.Background(), nil, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}) putResp, err := blob.Upload(context.Background(), nil, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.Response().StatusCode, chk.Equals, 201)
c.Assert(putResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(putResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
@ -67,12 +67,12 @@ func (b *BlobURLSuite) TestGetSetProperties(c *chk.C) {
ContentType: "mytype", ContentType: "mytype",
ContentLanguage: "martian", ContentLanguage: "martian",
} }
setResp, err := blob.SetProperties(context.Background(), properties, azblob.BlobAccessConditions{}) setResp, err := blob.SetHTTPHeaders(context.Background(), properties, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(setResp.Response().StatusCode, chk.Equals, 200) c.Assert(setResp.Response().StatusCode, chk.Equals, 200)
c.Assert(setResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(setResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(setResp.LastModified().IsZero(), chk.Equals, false) c.Assert(setResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(setResp.BlobSequenceNumber(), chk.Equals, "") c.Assert(setResp.BlobSequenceNumber(), chk.Equals, int64(-1))
c.Assert(setResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(setResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(setResp.Version(), chk.Not(chk.Equals), "") c.Assert(setResp.Version(), chk.Not(chk.Equals), "")
c.Assert(setResp.Date().IsZero(), chk.Equals, false) c.Assert(setResp.Date().IsZero(), chk.Equals, false)
@ -128,7 +128,7 @@ func (b *BlobURLSuite) TestGetSetMetadata(c *chk.C) {
c.Assert(setResp.Version(), chk.Not(chk.Equals), "") c.Assert(setResp.Version(), chk.Not(chk.Equals), "")
c.Assert(setResp.Date().IsZero(), chk.Equals, false) c.Assert(setResp.Date().IsZero(), chk.Equals, false)
getResp, err := blob.GetPropertiesAndMetadata(context.Background(), azblob.BlobAccessConditions{}) getResp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(getResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(getResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(getResp.LastModified().IsZero(), chk.Equals, false) c.Assert(getResp.LastModified().IsZero(), chk.Equals, false)
@ -145,11 +145,11 @@ func (b *BlobURLSuite) TestCopy(c *chk.C) {
defer delContainer(c, container) defer delContainer(c, container)
sourceBlob, _ := createNewBlockBlob(c, container) sourceBlob, _ := createNewBlockBlob(c, container)
_, err := sourceBlob.PutBlob(context.Background(), getReaderToRandomBytes(2048), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}) _, err := sourceBlob.Upload(context.Background(), getReaderToRandomBytes(2048), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
destBlob, _ := createNewBlockBlob(c, container) destBlob, _ := createNewBlockBlob(c, container)
copyResp, err := destBlob.StartCopy(context.Background(), sourceBlob.URL(), nil, azblob.BlobAccessConditions{}, azblob.BlobAccessConditions{}) copyResp, err := destBlob.StartCopyFromURL(context.Background(), sourceBlob.URL(), nil, azblob.BlobAccessConditions{}, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(copyResp.Response().StatusCode, chk.Equals, 202) c.Assert(copyResp.Response().StatusCode, chk.Equals, 202)
c.Assert(copyResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(copyResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
@ -160,7 +160,7 @@ func (b *BlobURLSuite) TestCopy(c *chk.C) {
c.Assert(copyResp.CopyID(), chk.Not(chk.Equals), "") c.Assert(copyResp.CopyID(), chk.Not(chk.Equals), "")
c.Assert(copyResp.CopyStatus(), chk.Not(chk.Equals), "") c.Assert(copyResp.CopyStatus(), chk.Not(chk.Equals), "")
abortResp, err := destBlob.AbortCopy(context.Background(), copyResp.CopyID(), azblob.LeaseAccessConditions{}) abortResp, err := destBlob.AbortCopyFromURL(context.Background(), copyResp.CopyID(), azblob.LeaseAccessConditions{})
// small copy completes before we have time to abort so check for failure case // small copy completes before we have time to abort so check for failure case
c.Assert(err, chk.NotNil) c.Assert(err, chk.NotNil)
c.Assert(abortResp, chk.IsNil) c.Assert(abortResp, chk.IsNil)
@ -179,21 +179,21 @@ func (b *BlobURLSuite) TestSnapshot(c *chk.C) {
resp, err := blob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{}) resp, err := blob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 201) c.Assert(resp.Response().StatusCode, chk.Equals, 201)
c.Assert(resp.Snapshot().IsZero(), chk.Equals, false) c.Assert(resp.Snapshot()=="", chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(resp.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(resp.Date().IsZero(), chk.Equals, false)
blobs, err := container.ListBlobs(context.Background(), azblob.Marker{}, azblob.ListBlobsOptions{Details: azblob.BlobListingDetails{Snapshots: true}}) blobs, err := container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Snapshots: true}})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blobs.Blobs.Blob, chk.HasLen, 2) c.Assert(blobs.Blobs.Blob, chk.HasLen, 2)
_, err = blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionOnly, azblob.BlobAccessConditions{}) _, err = blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionOnly, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
blobs, err = container.ListBlobs(context.Background(), azblob.Marker{}, azblob.ListBlobsOptions{Details: azblob.BlobListingDetails{Snapshots: true}}) blobs, err = container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Snapshots: true}})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blobs.Blobs.Blob, chk.HasLen, 1) c.Assert(blobs.Blobs.Blob, chk.HasLen, 1)
} }
@ -234,28 +234,25 @@ func (b *BlobURLSuite) TestLeaseAcquireRelease(c *chk.C) {
blob, _ := createNewBlockBlob(c, container) blob, _ := createNewBlockBlob(c, container)
resp, err := blob.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{}) acq, err := blob.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{})
leaseID := resp.LeaseID() leaseID := acq.LeaseID() // FIX
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 201) c.Assert(acq.StatusCode(), chk.Equals, 201)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(acq.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(acq.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(acq.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, leaseID) c.Assert(acq.LeaseID(), chk.Equals, leaseID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(acq.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(acq.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = blob.ReleaseLease(context.Background(), leaseID, azblob.HTTPAccessConditions{}) rel, err := blob.ReleaseLease(context.Background(), leaseID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(rel.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(rel.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(rel.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(rel.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, "") c.Assert(rel.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(rel.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
} }
func (b *BlobURLSuite) TestLeaseRenewChangeBreak(c *chk.C) { func (b *BlobURLSuite) TestLeaseRenewChangeBreak(c *chk.C) {
@ -265,45 +262,41 @@ func (b *BlobURLSuite) TestLeaseRenewChangeBreak(c *chk.C) {
blob, _ := createNewBlockBlob(c, container) blob, _ := createNewBlockBlob(c, container)
resp, err := blob.AcquireLease(context.Background(), newUUID().String(), 15, azblob.HTTPAccessConditions{}) acq, err := blob.AcquireLease(context.Background(), newUUID().String(), 15, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
leaseID := resp.LeaseID() leaseID := acq.LeaseID()
resp, err = blob.ChangeLease(context.Background(), leaseID, newUUID().String(), azblob.HTTPAccessConditions{}) chg, err := blob.ChangeLease(context.Background(), leaseID, newUUID().String(), azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
newID := resp.LeaseID() newID := chg.LeaseID()
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(chg.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(chg.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(chg.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(chg.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, newID) c.Assert(chg.LeaseID(), chk.Equals, newID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(chg.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = blob.RenewLease(context.Background(), newID, azblob.HTTPAccessConditions{}) renew, err := blob.RenewLease(context.Background(), newID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(renew.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(renew.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(renew.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(renew.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, newID) c.Assert(renew.LeaseID(), chk.Equals, newID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(renew.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(renew.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = blob.BreakLease(context.Background(), newID, 5, azblob.HTTPAccessConditions{}) brk, err := blob.BreakLease(context.Background(), 5, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(brk.StatusCode(), chk.Equals, 202)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(brk.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(brk.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(brk.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, "") c.Assert(brk.LeaseTime(), chk.Not(chk.Equals), "")
c.Assert(resp.LeaseTime(), chk.Not(chk.Equals), "") c.Assert(brk.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(brk.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = blob.ReleaseLease(context.Background(), newID, azblob.HTTPAccessConditions{}) _, err = blob.ReleaseLease(context.Background(), newID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
} }
@ -314,10 +307,10 @@ func (b *BlobURLSuite) TestGetBlobRange(c *chk.C) {
blob, _ := createNewBlockBlob(c, container) blob, _ := createNewBlockBlob(c, container)
contentR, contentD := getRandomDataAndReader(2048) contentR, contentD := getRandomDataAndReader(2048)
_, err := blob.PutBlob(context.Background(), contentR, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}) _, err := blob.Upload(context.Background(), contentR, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
resp, err := blob.GetBlob(context.Background(), azblob.BlobRange{Offset: 0, Count: 1024}, azblob.BlobAccessConditions{}, false) resp, err := blob.Download(context.Background(), 0, 1024, azblob.BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.ContentLength(), chk.Equals, int64(1024)) c.Assert(resp.ContentLength(), chk.Equals, int64(1024))
@ -325,7 +318,7 @@ func (b *BlobURLSuite) TestGetBlobRange(c *chk.C) {
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(download, chk.DeepEquals, contentD[:1024]) c.Assert(download, chk.DeepEquals, contentD[:1024])
resp, err = blob.GetBlob(context.Background(), azblob.BlobRange{Offset: 1024}, azblob.BlobAccessConditions{}, false) resp, err = blob.Download(context.Background(), 1024, 0, azblob.BlobAccessConditions{}, false)
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.ContentLength(), chk.Equals, int64(1024)) c.Assert(resp.ContentLength(), chk.Equals, int64(1024))
@ -335,14 +328,13 @@ func (b *BlobURLSuite) TestGetBlobRange(c *chk.C) {
c.Assert(download, chk.DeepEquals, contentD[1024:]) c.Assert(download, chk.DeepEquals, contentD[1024:])
c.Assert(resp.AcceptRanges(), chk.Equals, "bytes") c.Assert(resp.AcceptRanges(), chk.Equals, "bytes")
c.Assert(resp.BlobCommittedBlockCount(), chk.Equals, "") c.Assert(resp.BlobCommittedBlockCount(), chk.Equals, int32(-1))
c.Assert(resp.BlobContentMD5(), chk.Not(chk.Equals), [md5.Size]byte{}) c.Assert(resp.BlobContentMD5(), chk.Not(chk.Equals), [md5.Size]byte{})
c.Assert(resp.BlobSequenceNumber(), chk.Equals, "") c.Assert(resp.BlobSequenceNumber(), chk.Equals, int64(-1))
c.Assert(resp.BlobType(), chk.Equals, azblob.BlobBlockBlob) c.Assert(resp.BlobType(), chk.Equals, azblob.BlobBlockBlob)
c.Assert(resp.CacheControl(), chk.Equals, "") c.Assert(resp.CacheControl(), chk.Equals, "")
c.Assert(resp.ContentDisposition(), chk.Equals, "") c.Assert(resp.ContentDisposition(), chk.Equals, "")
c.Assert(resp.ContentEncoding(), chk.Equals, "") c.Assert(resp.ContentEncoding(), chk.Equals, "")
c.Assert(resp.ContentMD5(), chk.Equals, [md5.Size]byte{})
c.Assert(resp.ContentRange(), chk.Equals, "bytes 1024-2047/2048") c.Assert(resp.ContentRange(), chk.Equals, "bytes 1024-2047/2048")
c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream") c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream")
c.Assert(resp.CopyCompletionTime().IsZero(), chk.Equals, true) c.Assert(resp.CopyCompletionTime().IsZero(), chk.Equals, true)

136
2017-07-29/azblob/zt_url_block_blob_test.go Normal file → Executable file
Просмотреть файл

@ -1,68 +1,68 @@
package azblob_test package azblob_test
import ( import (
"context" "context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"github.com/Azure/azure-storage-blob-go/2017-07-29/azblob" "github.com/Azure/azure-storage-blob-go/2017-07-29/azblob"
chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 chk "gopkg.in/check.v1" // go get gopkg.in/check.v1
) )
type BlockBlobURLSuite struct{} type BlockBlobURLSuite struct{}
var _ = chk.Suite(&BlockBlobURLSuite{}) var _ = chk.Suite(&BlockBlobURLSuite{})
func (b *BlockBlobURLSuite) TestPutGetBlocks(c *chk.C) { func (b *BlockBlobURLSuite) TestPutGetBlocks(c *chk.C) {
bsu := getBSU() bsu := getBSU()
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
blob := container.NewBlockBlobURL(generateBlobName()) blob := container.NewBlockBlobURL(generateBlobName())
blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))) blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0)))
putResp, err := blob.PutBlock(context.Background(), blockID, getReaderToRandomBytes(1024), azblob.LeaseAccessConditions{}) putResp, err := blob.StageBlock(context.Background(), blockID, getReaderToRandomBytes(1024), azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.Response().StatusCode, chk.Equals, 201)
c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "")
c.Assert(putResp.Date().IsZero(), chk.Equals, false) c.Assert(putResp.Date().IsZero(), chk.Equals, false)
blockList, err := blob.GetBlockList(context.Background(), azblob.BlockListAll, azblob.LeaseAccessConditions{}) blockList, err := blob.GetBlockList(context.Background(), azblob.BlockListAll, azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blockList.Response().StatusCode, chk.Equals, 200) c.Assert(blockList.Response().StatusCode, chk.Equals, 200)
c.Assert(blockList.LastModified().IsZero(), chk.Equals, true) c.Assert(blockList.LastModified().IsZero(), chk.Equals, true)
c.Assert(blockList.ETag(), chk.Equals, azblob.ETagNone) c.Assert(blockList.ETag(), chk.Equals, azblob.ETagNone)
c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "") c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "")
c.Assert(blockList.BlobContentLength(), chk.Equals, int64(-1)) c.Assert(blockList.BlobContentLength(), chk.Equals, int64(-1))
c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "") c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "")
c.Assert(blockList.Version(), chk.Not(chk.Equals), "") c.Assert(blockList.Version(), chk.Not(chk.Equals), "")
c.Assert(blockList.Date().IsZero(), chk.Equals, false) c.Assert(blockList.Date().IsZero(), chk.Equals, false)
c.Assert(blockList.CommittedBlocks, chk.HasLen, 0) c.Assert(blockList.CommittedBlocks, chk.HasLen, 0)
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 1) c.Assert(blockList.UncommittedBlocks, chk.HasLen, 1)
listResp, err := blob.PutBlockList(context.Background(), []string{blockID}, nil, azblob.BlobHTTPHeaders{}, azblob.BlobAccessConditions{}) listResp, err := blob.CommitBlockList(context.Background(), []string{blockID}, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(listResp.Response().StatusCode, chk.Equals, 201) c.Assert(listResp.Response().StatusCode, chk.Equals, 201)
c.Assert(listResp.LastModified().IsZero(), chk.Equals, false) c.Assert(listResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(listResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(listResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(listResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(listResp.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(listResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(listResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(listResp.Version(), chk.Not(chk.Equals), "") c.Assert(listResp.Version(), chk.Not(chk.Equals), "")
c.Assert(listResp.Date().IsZero(), chk.Equals, false) c.Assert(listResp.Date().IsZero(), chk.Equals, false)
blockList, err = blob.GetBlockList(context.Background(), azblob.BlockListAll, azblob.LeaseAccessConditions{}) blockList, err = blob.GetBlockList(context.Background(), azblob.BlockListAll, azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blockList.Response().StatusCode, chk.Equals, 200) c.Assert(blockList.Response().StatusCode, chk.Equals, 200)
c.Assert(blockList.LastModified().IsZero(), chk.Equals, false) c.Assert(blockList.LastModified().IsZero(), chk.Equals, false)
c.Assert(blockList.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(blockList.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "") c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "")
c.Assert(blockList.BlobContentLength(), chk.Equals, int64(1024)) c.Assert(blockList.BlobContentLength(), chk.Equals, int64(1024))
c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "") c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "")
c.Assert(blockList.Version(), chk.Not(chk.Equals), "") c.Assert(blockList.Version(), chk.Not(chk.Equals), "")
c.Assert(blockList.Date().IsZero(), chk.Equals, false) c.Assert(blockList.Date().IsZero(), chk.Equals, false)
c.Assert(blockList.CommittedBlocks, chk.HasLen, 1) c.Assert(blockList.CommittedBlocks, chk.HasLen, 1)
c.Assert(blockList.UncommittedBlocks, chk.HasLen, 0) c.Assert(blockList.UncommittedBlocks, chk.HasLen, 0)
} }

120
2017-07-29/azblob/zt_url_container_test.go Normal file → Executable file
Просмотреть файл

@ -33,7 +33,7 @@ func (s *ContainerURLSuite) TestCreateDelete(c *chk.C) {
c.Assert(cResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(cResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(cResp.Version(), chk.Not(chk.Equals), "") c.Assert(cResp.Version(), chk.Not(chk.Equals), "")
containers, err := sa.ListContainers(context.Background(), azblob.Marker{}, azblob.ListContainersOptions{Prefix: containerPrefix}) containers, err := sa.ListContainersSegment(context.Background(), azblob.Marker{}, azblob.ListContainersSegmentOptions{Prefix: containerPrefix})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(containers.Containers, chk.HasLen, 1) c.Assert(containers.Containers, chk.HasLen, 1)
c.Assert(containers.Containers[0].Name, chk.Equals, containerName) c.Assert(containers.Containers[0].Name, chk.Equals, containerName)
@ -45,7 +45,7 @@ func (s *ContainerURLSuite) TestCreateDelete(c *chk.C) {
c.Assert(dResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(dResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(dResp.Version(), chk.Not(chk.Equals), "") c.Assert(dResp.Version(), chk.Not(chk.Equals), "")
containers, err = sa.ListContainers(context.Background(), azblob.Marker{}, azblob.ListContainersOptions{Prefix: containerPrefix}) containers, err = sa.ListContainersSegment(context.Background(), azblob.Marker{}, azblob.ListContainersSegmentOptions{Prefix: containerPrefix})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(containers.Containers, chk.HasLen, 0) c.Assert(containers.Containers, chk.HasLen, 0)
} }
@ -76,7 +76,7 @@ func (s *ContainerURLSuite) TestGetSetPermissions(c *chk.C) {
Permission: "rw", Permission: "rw",
}, },
}} }}
sResp, err := container.SetPermissions(context.Background(), azblob.PublicAccessNone, permissions, azblob.ContainerAccessConditions{}) sResp, err := container.SetAccessPolicy(context.Background(), azblob.PublicAccessNone, permissions, azblob.ContainerAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(sResp.Response().StatusCode, chk.Equals, 200) c.Assert(sResp.Response().StatusCode, chk.Equals, 200)
c.Assert(sResp.Date().IsZero(), chk.Equals, false) c.Assert(sResp.Date().IsZero(), chk.Equals, false)
@ -85,7 +85,7 @@ func (s *ContainerURLSuite) TestGetSetPermissions(c *chk.C) {
c.Assert(sResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(sResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(sResp.Version(), chk.Not(chk.Equals), "") c.Assert(sResp.Version(), chk.Not(chk.Equals), "")
gResp, err := container.GetPermissions(context.Background(), azblob.LeaseAccessConditions{}) gResp, err := container.GetAccessPolicy(context.Background(), azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(gResp.Response().StatusCode, chk.Equals, 200) c.Assert(gResp.Response().StatusCode, chk.Equals, 200)
c.Assert(gResp.BlobPublicAccess(), chk.Equals, azblob.PublicAccessNone) c.Assert(gResp.BlobPublicAccess(), chk.Equals, azblob.PublicAccessNone)
@ -117,7 +117,7 @@ func (s *ContainerURLSuite) TestGetSetMetadata(c *chk.C) {
c.Assert(sResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(sResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(sResp.Version(), chk.Not(chk.Equals), "") c.Assert(sResp.Version(), chk.Not(chk.Equals), "")
gResp, err := container.GetPropertiesAndMetadata(context.Background(), azblob.LeaseAccessConditions{}) gResp, err := container.GetProperties(context.Background(), azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(gResp.Response().StatusCode, chk.Equals, 200) c.Assert(gResp.Response().StatusCode, chk.Equals, 200)
c.Assert(gResp.Date().IsZero(), chk.Equals, false) c.Assert(gResp.Date().IsZero(), chk.Equals, false)
@ -134,7 +134,7 @@ func (s *ContainerURLSuite) TestListBlobs(c *chk.C) {
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
blobs, err := container.ListBlobs(context.Background(), azblob.Marker{}, azblob.ListBlobsOptions{}) blobs, err := container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blobs.Response().StatusCode, chk.Equals, 200) c.Assert(blobs.Response().StatusCode, chk.Equals, 200)
c.Assert(blobs.ContentType(), chk.Not(chk.Equals), "") c.Assert(blobs.ContentType(), chk.Not(chk.Equals), "")
@ -151,15 +151,14 @@ func (s *ContainerURLSuite) TestListBlobs(c *chk.C) {
blob := container.NewBlockBlobURL(generateBlobName()) blob := container.NewBlockBlobURL(generateBlobName())
_, err = blob.PutBlob(context.Background(), nil, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}) _, err = blob.Upload(context.Background(), nil, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
blobs, err = container.ListBlobs(context.Background(), azblob.Marker{}, azblob.ListBlobsOptions{}) blobs, err = container.ListBlobsFlatSegment(context.Background(), azblob.Marker{}, azblob.ListBlobsSegmentOptions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blobs.Blobs.BlobPrefix, chk.HasLen, 0)
c.Assert(blobs.Blobs.Blob, chk.HasLen, 1) c.Assert(blobs.Blobs.Blob, chk.HasLen, 1)
c.Assert(blobs.Blobs.Blob[0].Name, chk.NotNil) c.Assert(blobs.Blobs.Blob[0].Name, chk.NotNil)
c.Assert(blobs.Blobs.Blob[0].Snapshot.IsZero(), chk.Equals, true) c.Assert(blobs.Blobs.Blob[0].Snapshot =="", chk.Equals, true)
c.Assert(blobs.Blobs.Blob[0].Metadata, chk.HasLen, 0) c.Assert(blobs.Blobs.Blob[0].Metadata, chk.HasLen, 0)
c.Assert(blobs.Blobs.Blob[0].Properties, chk.NotNil) c.Assert(blobs.Blobs.Blob[0].Properties, chk.NotNil)
c.Assert(blobs.Blobs.Blob[0].Properties.LastModified, chk.NotNil) c.Assert(blobs.Blobs.Blob[0].Properties.LastModified, chk.NotNil)
@ -191,28 +190,25 @@ func (s *ContainerURLSuite) TestLeaseAcquireRelease(c *chk.C) {
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
resp, err := container.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{}) acq, err := container.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{})
leaseID := resp.LeaseID() leaseID := acq.LeaseID()
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 201) c.Assert(acq.StatusCode(), chk.Equals, 201)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(acq.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(acq.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(acq.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, leaseID) c.Assert(acq.LeaseID(), chk.Equals, leaseID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(acq.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(acq.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = container.ReleaseLease(context.Background(), leaseID, azblob.HTTPAccessConditions{}) rel, err := container.ReleaseLease(context.Background(), leaseID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(rel.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(rel.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(rel.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(rel.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, "") c.Assert(rel.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(rel.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
} }
func (s *ContainerURLSuite) TestLeaseRenewChangeBreak(c *chk.C) { func (s *ContainerURLSuite) TestLeaseRenewChangeBreak(c *chk.C) {
@ -220,46 +216,42 @@ func (s *ContainerURLSuite) TestLeaseRenewChangeBreak(c *chk.C) {
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
resp, err := container.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{}) al, err := container.AcquireLease(context.Background(), "", 15, azblob.HTTPAccessConditions{})
leaseID := resp.LeaseID() leaseID := al.LeaseID()
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
newID := newUUID().String() newID := newUUID().String()
resp, err = container.ChangeLease(context.Background(), leaseID, newID, azblob.HTTPAccessConditions{}) cl, err := container.ChangeLease(context.Background(), leaseID, newID, azblob.HTTPAccessConditions{})
newID = resp.LeaseID() newID = cl.LeaseID()
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(cl.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(cl.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(cl.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(cl.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, newID) c.Assert(cl.LeaseID(), chk.Equals, newID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(cl.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(cl.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = container.RenewLease(context.Background(), newID, azblob.HTTPAccessConditions{}) rl, err := container.RenewLease(context.Background(), newID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(rl.StatusCode(), chk.Equals, 200)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(rl.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(rl.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(rl.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, newID) c.Assert(rl.LeaseID(), chk.Equals, newID)
c.Assert(resp.LeaseTime(), chk.Equals, int32(-1)) c.Assert(rl.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(rl.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = container.BreakLease(context.Background(), newID, 5, azblob.HTTPAccessConditions{}) bl, err := container.BreakLease(context.Background(), 5, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(bl.StatusCode(), chk.Equals, 202)
c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(bl.Date().IsZero(), chk.Equals, false)
c.Assert(resp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(bl.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(bl.LastModified().IsZero(), chk.Equals, false)
c.Assert(resp.LeaseID(), chk.Equals, "") c.Assert(bl.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.LeaseTime(), chk.Not(chk.Equals), int32(-1)) c.Assert(bl.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "")
resp, err = container.ReleaseLease(context.Background(), newID, azblob.HTTPAccessConditions{}) _, err = container.ReleaseLease(context.Background(), newID, azblob.HTTPAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
} }
@ -281,7 +273,7 @@ func (s *ContainerURLSuite) TestListBlobsPaged(c *chk.C) {
iterations := numBlobs / maxResultsPerPage iterations := numBlobs / maxResultsPerPage
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
resp, err := container.ListBlobs(context.Background(), marker, azblob.ListBlobsOptions{MaxResults: maxResultsPerPage}) resp, err := container.ListBlobsFlatSegment(context.Background(), marker, azblob.ListBlobsSegmentOptions{MaxResults: maxResultsPerPage})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Blobs.Blob, chk.HasLen, maxResultsPerPage) c.Assert(resp.Blobs.Blob, chk.HasLen, maxResultsPerPage)
@ -296,7 +288,7 @@ func (s *ContainerURLSuite) TestSetMetadataCondition(c *chk.C) {
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
currTime := time.Now() currTime := time.Now().UTC()
rResp, err := container.SetMetadata(context.Background(), azblob.Metadata{"foo": "bar"}, rResp, err := container.SetMetadata(context.Background(), azblob.Metadata{"foo": "bar"},
azblob.ContainerAccessConditions{HTTPAccessConditions: azblob.HTTPAccessConditions{IfModifiedSince: currTime}}) azblob.ContainerAccessConditions{HTTPAccessConditions: azblob.HTTPAccessConditions{IfModifiedSince: currTime}})
c.Assert(err, chk.NotNil) c.Assert(err, chk.NotNil)
@ -304,7 +296,7 @@ func (s *ContainerURLSuite) TestSetMetadataCondition(c *chk.C) {
se, ok := err.(azblob.StorageError) se, ok := err.(azblob.StorageError)
c.Assert(ok, chk.Equals, true) c.Assert(ok, chk.Equals, true)
c.Assert(se.Response().StatusCode, chk.Equals, http.StatusPreconditionFailed) c.Assert(se.Response().StatusCode, chk.Equals, http.StatusPreconditionFailed)
gResp, err := container.GetPropertiesAndMetadata(context.Background(), azblob.LeaseAccessConditions{}) gResp, err := container.GetProperties(context.Background(), azblob.LeaseAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
md := gResp.NewMetadata() md := gResp.NewMetadata()
c.Assert(md, chk.HasLen, 0) c.Assert(md, chk.HasLen, 0)
@ -328,7 +320,7 @@ func (s *ContainerURLSuite) TestListBlobsWithPrefix(c *chk.C) {
createBlockBlobWithPrefix(c, container, prefix) createBlockBlobWithPrefix(c, container, prefix)
} }
blobs, err := container.ListBlobs(context.Background(), azblob.Marker{}, azblob.ListBlobsOptions{Delimiter: "/"}) blobs, err := container.ListBlobsHierarchySegment(context.Background(), azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(blobs.Blobs.BlobPrefix, chk.HasLen, 3) c.Assert(blobs.Blobs.BlobPrefix, chk.HasLen, 3)
c.Assert(blobs.Blobs.Blob, chk.HasLen, 0) c.Assert(blobs.Blobs.Blob, chk.HasLen, 0)

34
2017-07-29/azblob/zt_url_page_blob_test.go Normal file → Executable file
Просмотреть файл

@ -19,18 +19,18 @@ func (b *PageBlobURLSuite) TestPutGetPages(c *chk.C) {
blob, _ := createNewPageBlob(c, container) blob, _ := createNewPageBlob(c, container)
pageRange := azblob.PageRange{Start: 0, End: 1023} pageRange := azblob.PageRange{Start: 0, End: 1023}
putResp, err := blob.PutPages(context.Background(), pageRange, getReaderToRandomBytes(1024), azblob.BlobAccessConditions{}) putResp, err := blob.UploadPages(context.Background(), 0, getReaderToRandomBytes(1024), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.Response().StatusCode, chk.Equals, 201)
c.Assert(putResp.LastModified().IsZero(), chk.Equals, false) c.Assert(putResp.LastModified().IsZero(), chk.Equals, false)
c.Assert(putResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone) c.Assert(putResp.ETag(), chk.Not(chk.Equals), azblob.ETagNone)
c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "")
c.Assert(putResp.BlobSequenceNumber(), chk.Equals, int32(0)) c.Assert(putResp.BlobSequenceNumber(), chk.Equals, int64(0))
c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "")
c.Assert(putResp.Date().IsZero(), chk.Equals, false) c.Assert(putResp.Date().IsZero(), chk.Equals, false)
pageList, err := blob.GetPageRanges(context.Background(), azblob.BlobRange{Offset: 0, Count: 1023}, azblob.BlobAccessConditions{}) pageList, err := blob.GetPageRanges(context.Background(), 0, 1023, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(pageList.Response().StatusCode, chk.Equals, 200) c.Assert(pageList.Response().StatusCode, chk.Equals, 200)
c.Assert(pageList.LastModified().IsZero(), chk.Equals, false) c.Assert(pageList.LastModified().IsZero(), chk.Equals, false)
@ -49,26 +49,26 @@ func (b *PageBlobURLSuite) TestClearDiffPages(c *chk.C) {
defer delContainer(c, container) defer delContainer(c, container)
blob, _ := createNewPageBlob(c, container) blob, _ := createNewPageBlob(c, container)
_, err := blob.PutPages(context.Background(), azblob.PageRange{Start: 0, End: 2047}, getReaderToRandomBytes(2048), azblob.BlobAccessConditions{}) _, err := blob.UploadPages(context.Background(), 0, getReaderToRandomBytes(2048), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
snapshotResp, err := blob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{}) snapshotResp, err := blob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
_, err = blob.PutPages(context.Background(), azblob.PageRange{Start: 2048, End: 4095}, getReaderToRandomBytes(2048), azblob.BlobAccessConditions{}) _, err = blob.UploadPages(context.Background(), 2048, getReaderToRandomBytes(2048), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
pageList, err := blob.GetPageRangesDiff(context.Background(), azblob.BlobRange{Offset: 0, Count: 4095}, snapshotResp.Snapshot(), azblob.BlobAccessConditions{}) pageList, err := blob.GetPageRangesDiff(context.Background(), 0, 4096, snapshotResp.Snapshot(), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(pageList.PageRange, chk.HasLen, 1) c.Assert(pageList.PageRange, chk.HasLen, 1)
c.Assert(pageList.PageRange[0].Start, chk.Equals, int32(2048)) c.Assert(pageList.PageRange[0].Start, chk.Equals, int64(2048))
c.Assert(pageList.PageRange[0].End, chk.Equals, int32(4095)) c.Assert(pageList.PageRange[0].End, chk.Equals, int64(4095))
clearResp, err := blob.ClearPages(context.Background(), azblob.PageRange{Start: 2048, End: 4095}, azblob.BlobAccessConditions{}) clearResp, err := blob.ClearPages(context.Background(), 2048, 2048, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(clearResp.Response().StatusCode, chk.Equals, 201) c.Assert(clearResp.Response().StatusCode, chk.Equals, 201)
pageList, err = blob.GetPageRangesDiff(context.Background(), azblob.BlobRange{Offset: 0, Count: 4095}, snapshotResp.Snapshot(), azblob.BlobAccessConditions{}) pageList, err = blob.GetPageRangesDiff(context.Background(), 0, 4095, snapshotResp.Snapshot(), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(pageList.PageRange, chk.HasLen, 0) c.Assert(pageList.PageRange, chk.HasLen, 0)
} }
@ -77,18 +77,18 @@ func (b *PageBlobURLSuite) TestIncrementalCopy(c *chk.C) {
bsu := getBSU() bsu := getBSU()
container, _ := createNewContainer(c, bsu) container, _ := createNewContainer(c, bsu)
defer delContainer(c, container) defer delContainer(c, container)
_, err := container.SetPermissions(context.Background(), azblob.PublicAccessBlob, nil, azblob.ContainerAccessConditions{}) _, err := container.SetAccessPolicy(context.Background(), azblob.PublicAccessBlob, nil, azblob.ContainerAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
srcBlob, _ := createNewPageBlob(c, container) srcBlob, _ := createNewPageBlob(c, container)
_, err = srcBlob.PutPages(context.Background(), azblob.PageRange{Start: 0, End: 1023}, getReaderToRandomBytes(1024), azblob.BlobAccessConditions{}) _, err = srcBlob.UploadPages(context.Background(), 0, getReaderToRandomBytes(1024), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
snapshotResp, err := srcBlob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{}) snapshotResp, err := srcBlob.CreateSnapshot(context.Background(), nil, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
dstBlob := container.NewPageBlobURL(generateBlobName()) dstBlob := container.NewPageBlobURL(generateBlobName())
resp, err := dstBlob.StartIncrementalCopy(context.Background(), srcBlob.URL(), snapshotResp.Snapshot(), azblob.BlobAccessConditions{}) resp, err := dstBlob.StartCopyIncremental(context.Background(), srcBlob.URL(), snapshotResp.Snapshot(), azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(resp.LastModified().IsZero(), chk.Equals, false)
@ -116,7 +116,7 @@ func (b *PageBlobURLSuite) TestResizePageBlob(c *chk.C) {
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.Response().StatusCode, chk.Equals, 200)
resp2, err := blob.GetPropertiesAndMetadata(ctx, azblob.BlobAccessConditions{}) resp2, err := blob.GetProperties(ctx, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp2.ContentLength(), chk.Equals, int64(8192)) c.Assert(resp2.ContentLength(), chk.Equals, int64(8192))
} }
@ -128,15 +128,15 @@ func (b *PageBlobURLSuite) TestPageSequenceNumbers(c *chk.C) {
defer delContainer(c, container) defer delContainer(c, container)
resp, err := blob.SetSequenceNumber(context.Background(), azblob.SequenceNumberActionIncrement, 0, azblob.BlobHTTPHeaders{}, azblob.BlobAccessConditions{}) resp, err := blob.UpdateSequenceNumber(context.Background(), azblob.SequenceNumberActionIncrement, 0, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.Response().StatusCode, chk.Equals, 200)
resp, err = blob.SetSequenceNumber(context.Background(), azblob.SequenceNumberActionMax, 7, azblob.BlobHTTPHeaders{}, azblob.BlobAccessConditions{}) resp, err = blob.UpdateSequenceNumber(context.Background(), azblob.SequenceNumberActionMax, 7, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.Response().StatusCode, chk.Equals, 200)
resp, err = blob.SetSequenceNumber(context.Background(), azblob.SequenceNumberActionUpdate, 11, azblob.BlobHTTPHeaders{}, azblob.BlobAccessConditions{}) resp, err = blob.UpdateSequenceNumber(context.Background(), azblob.SequenceNumberActionUpdate, 11, azblob.BlobAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.Response().StatusCode, chk.Equals, 200)
} }

218
2017-07-29/azblob/zt_url_service_test.go Normal file → Executable file
Просмотреть файл

@ -1,109 +1,109 @@
package azblob_test package azblob_test
import ( import (
"context" "context"
"github.com/Azure/azure-storage-blob-go/2017-07-29/azblob" "github.com/Azure/azure-storage-blob-go/2017-07-29/azblob"
chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 chk "gopkg.in/check.v1" // go get gopkg.in/check.v1
) )
type StorageAccountSuite struct{} type StorageAccountSuite struct{}
var _ = chk.Suite(&StorageAccountSuite{}) var _ = chk.Suite(&StorageAccountSuite{})
/*func (s *StorageAccountSuite) TestGetSetProperties(c *chk.C) { /*func (s *StorageAccountSuite) TestGetSetProperties(c *chk.C) {
sa := getStorageAccount(c) sa := getStorageAccount(c)
setProps := StorageServiceProperties{} setProps := StorageServiceProperties{}
resp, err := sa.SetProperties(context.Background(), setProps) resp, err := sa.SetProperties(context.Background(), setProps)
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(resp.Response().StatusCode, chk.Equals, 202)
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "")
props, err := sa.GetProperties(context.Background()) props, err := sa.GetProperties(context.Background())
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(props.Response().StatusCode, chk.Equals, 200) c.Assert(props.Response().StatusCode, chk.Equals, 200)
c.Assert(props.RequestID(), chk.Not(chk.Equals), "") c.Assert(props.RequestID(), chk.Not(chk.Equals), "")
c.Assert(props.Version(), chk.Not(chk.Equals), "") c.Assert(props.Version(), chk.Not(chk.Equals), "")
c.Assert(props.Logging, chk.NotNil) c.Assert(props.Logging, chk.NotNil)
c.Assert(props.HourMetrics, chk.NotNil) c.Assert(props.HourMetrics, chk.NotNil)
c.Assert(props.MinuteMetrics, chk.NotNil) c.Assert(props.MinuteMetrics, chk.NotNil)
c.Assert(props.Cors, chk.HasLen, 0) c.Assert(props.Cors, chk.HasLen, 0)
c.Assert(props.DefaultServiceVersion, chk.IsNil) // TODO: this seems like a bug c.Assert(props.DefaultServiceVersion, chk.IsNil) // TODO: this seems like a bug
} }
func (s *StorageAccountSuite) TestGetStatus(c *chk.C) { func (s *StorageAccountSuite) TestGetStatus(c *chk.C) {
sa := getStorageAccount(c) sa := getStorageAccount(c)
if !strings.Contains(sa.URL().Path, "-secondary") { if !strings.Contains(sa.URL().Path, "-secondary") {
c.Skip("only applicable on secondary storage accounts") c.Skip("only applicable on secondary storage accounts")
} }
stats, err := sa.GetStats(context.Background()) stats, err := sa.GetStats(context.Background())
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(stats, chk.NotNil) c.Assert(stats, chk.NotNil)
}*/ }*/
func (s *StorageAccountSuite) TestListContainers(c *chk.C) { func (s *StorageAccountSuite) TestListContainers(c *chk.C) {
sa := getBSU() sa := getBSU()
resp, err := sa.ListContainers(context.Background(), azblob.Marker{}, azblob.ListContainersOptions{Prefix: containerPrefix}) resp, err := sa.ListContainersSegment(context.Background(), azblob.Marker{}, azblob.ListContainersSegmentOptions{Prefix: containerPrefix})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.Response().StatusCode, chk.Equals, 200)
c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.RequestID(), chk.Not(chk.Equals), "")
c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "")
c.Assert(resp.Containers, chk.HasLen, 0) c.Assert(resp.Containers, chk.HasLen, 0)
c.Assert(resp.ServiceEndpoint, chk.NotNil) c.Assert(resp.ServiceEndpoint, chk.NotNil)
container, _ := createNewContainer(c, sa) container, _ := createNewContainer(c, sa)
defer delContainer(c, container) defer delContainer(c, container)
md := azblob.Metadata{ md := azblob.Metadata{
"foo": "foovalue", "foo": "foovalue",
"bar": "barvalue", "bar": "barvalue",
} }
_, err = container.SetMetadata(context.Background(), md, azblob.ContainerAccessConditions{}) _, err = container.SetMetadata(context.Background(), md, azblob.ContainerAccessConditions{})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
resp, err = sa.ListContainers(context.Background(), azblob.Marker{}, azblob.ListContainersOptions{Detail: azblob.ListContainersDetail{Metadata: true}, Prefix: containerPrefix}) resp, err = sa.ListContainersSegment(context.Background(), azblob.Marker{}, azblob.ListContainersSegmentOptions{Detail: azblob.ListContainersDetail{Metadata: true}, Prefix: containerPrefix})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Containers, chk.HasLen, 1) c.Assert(resp.Containers, chk.HasLen, 1)
c.Assert(resp.Containers[0].Name, chk.NotNil) c.Assert(resp.Containers[0].Name, chk.NotNil)
c.Assert(resp.Containers[0].Properties, chk.NotNil) c.Assert(resp.Containers[0].Properties, chk.NotNil)
c.Assert(resp.Containers[0].Properties.LastModified, chk.NotNil) c.Assert(resp.Containers[0].Properties.LastModified, chk.NotNil)
c.Assert(resp.Containers[0].Properties.Etag, chk.NotNil) c.Assert(resp.Containers[0].Properties.Etag, chk.NotNil)
c.Assert(resp.Containers[0].Properties.LeaseStatus, chk.Equals, azblob.LeaseStatusUnlocked) c.Assert(resp.Containers[0].Properties.LeaseStatus, chk.Equals, azblob.LeaseStatusUnlocked)
c.Assert(resp.Containers[0].Properties.LeaseState, chk.Equals, azblob.LeaseStateAvailable) c.Assert(resp.Containers[0].Properties.LeaseState, chk.Equals, azblob.LeaseStateAvailable)
c.Assert(string(resp.Containers[0].Properties.LeaseDuration), chk.Equals, "") c.Assert(string(resp.Containers[0].Properties.LeaseDuration), chk.Equals, "")
c.Assert(string(resp.Containers[0].Properties.PublicAccess), chk.Equals, string(azblob.PublicAccessNone)) c.Assert(string(resp.Containers[0].Properties.PublicAccess), chk.Equals, string(azblob.PublicAccessNone))
c.Assert(resp.Containers[0].Metadata, chk.DeepEquals, md) c.Assert(resp.Containers[0].Metadata, chk.DeepEquals, md)
} }
func (s *StorageAccountSuite) TestListContainersPaged(c *chk.C) { func (s *StorageAccountSuite) TestListContainersPaged(c *chk.C) {
sa := getBSU() sa := getBSU()
const numContainers = 4 const numContainers = 4
const maxResultsPerPage = 2 const maxResultsPerPage = 2
const pagedContainersPrefix = "azblobspagedtest" const pagedContainersPrefix = "azblobspagedtest"
containers := make([]azblob.ContainerURL, numContainers) containers := make([]azblob.ContainerURL, numContainers)
for i := 0; i < numContainers; i++ { for i := 0; i < numContainers; i++ {
containers[i], _ = createNewContainerWithSuffix(c, sa, pagedContainersPrefix) containers[i], _ = createNewContainerWithSuffix(c, sa, pagedContainersPrefix)
} }
defer func() { defer func() {
for i := range containers { for i := range containers {
delContainer(c, containers[i]) delContainer(c, containers[i])
} }
}() }()
marker := azblob.Marker{} marker := azblob.Marker{}
iterations := numContainers / maxResultsPerPage iterations := numContainers / maxResultsPerPage
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
resp, err := sa.ListContainers(context.Background(), marker, azblob.ListContainersOptions{MaxResults: maxResultsPerPage, Prefix: containerPrefix + pagedContainersPrefix}) resp, err := sa.ListContainersSegment(context.Background(), marker, azblob.ListContainersSegmentOptions{MaxResults: maxResultsPerPage, Prefix: containerPrefix + pagedContainersPrefix})
c.Assert(err, chk.IsNil) c.Assert(err, chk.IsNil)
c.Assert(resp.Containers, chk.HasLen, maxResultsPerPage) c.Assert(resp.Containers, chk.HasLen, maxResultsPerPage)
hasMore := i < iterations-1 hasMore := i < iterations-1
c.Assert(resp.NextMarker.NotDone(), chk.Equals, hasMore) c.Assert(resp.NextMarker.NotDone(), chk.Equals, hasMore)
marker = resp.NextMarker marker = resp.NextMarker
} }
} }

Просмотреть файл

@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"time" "time"
@ -27,8 +28,9 @@ func newAppendBlobsClient(url url.URL, p pipeline.Pipeline) appendBlobsClient {
// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is // Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is
// supported only on version 2015-02-21 version or later. // supported only on version 2015-02-21 version or later.
// //
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // contentLength is the length of the request. body is initial data body will be closed upon successful return. Callers
// error.timeout is the timeout parameter is expressed in seconds. For more information, see <a // should ensure closure when receiving an error.timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for // lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
@ -43,16 +45,14 @@ func newAppendBlobsClient(url url.URL, p pipeline.Pipeline) appendBlobsClient {
// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
// in the analytics logs when storage analytics logging is enabled. // in the analytics logs when storage analytics logging is enabled.
func (client appendBlobsClient) AppendBlock(ctx context.Context, body io.ReadSeeker, timeout *int32, leaseID *string, maxSize *int32, appendPosition *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobsAppendBlockResponse, error) { func (client appendBlobsClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, maxSize *int32, appendPosition *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobsAppendBlockResponse, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.appendBlockPreparer(body, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) req, err := client.appendBlockPreparer(contentLength, body, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -64,7 +64,7 @@ func (client appendBlobsClient) AppendBlock(ctx context.Context, body io.ReadSee
} }
// appendBlockPreparer prepares the AppendBlock request. // appendBlockPreparer prepares the AppendBlock request.
func (client appendBlobsClient) appendBlockPreparer(body io.ReadSeeker, timeout *int32, leaseID *string, maxSize *int32, appendPosition *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { func (client appendBlobsClient) appendBlockPreparer(contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, maxSize *int32, appendPosition *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body) req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
@ -75,6 +75,7 @@ func (client appendBlobsClient) appendBlockPreparer(body io.ReadSeeker, timeout
} }
params.Set("comp", "appendblock") params.Set("comp", "appendblock")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if leaseID != nil { if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID) req.Header.Set("x-ms-lease-id", *leaseID)
} }
@ -109,5 +110,122 @@ func (client appendBlobsClient) appendBlockResponder(resp pipeline.Response) (pi
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &AppendBlobsAppendBlockResponse{rawResponse: resp.Response()}, err return &AppendBlobsAppendBlockResponse{rawResponse: resp.Response()}, err
} }
// Create the Create Append Blob operation creates a new append blob.
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client appendBlobsClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobsCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
if err != nil {
return nil, err
}
return resp.(*AppendBlobsCreateResponse), err
}
// createPreparer prepares the Create request.
func (client appendBlobsClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", fmt.Sprintf("%v", blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-blob-type", "AppendBlob")
return req, nil
}
// createResponder handles the response to the Create request.
func (client appendBlobsClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &AppendBlobsCreateResponse{rawResponse: resp.Response()}, err
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -26,6 +26,133 @@ func newBlockBlobsClient(url url.URL, p pipeline.Pipeline) blockBlobsClient {
return blockBlobsClient{newManagementClient(url, p)} return blockBlobsClient{newManagementClient(url, p)}
} }
// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the
// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior
// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed,
// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from
// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
// block, whichever list it may belong to.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified,
// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's
// content type. If specified, this property is stored with the blob and returned with a read request.
// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client blockBlobsClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobsCommitBlockListResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req)
if err != nil {
return nil, err
}
return resp.(*BlockBlobsCommitBlockListResponse), err
}
// commitBlockListPreparer prepares the CommitBlockList request.
func (client blockBlobsClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "blocklist")
req.URL.RawQuery = params.Encode()
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", fmt.Sprintf("%v", blobContentMD5))
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
b, err := xml.Marshal(blocks)
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
}
req.Header.Set("Content-Type", "application/xml")
err = req.SetBody(bytes.NewReader(b))
if err != nil {
return req, pipeline.NewError(err, "failed to set request body")
}
return req, nil
}
// commitBlockListResponder handles the response to the CommitBlockList request.
func (client blockBlobsClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobsCommitBlockListResponse{rawResponse: resp.Response()}, err
}
// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block // GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block
// blob // blob
// //
@ -38,7 +165,7 @@ func newBlockBlobsClient(url url.URL, p pipeline.Pipeline) blockBlobsClient {
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled. // limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobsClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *time.Time, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { func (client blockBlobsClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
@ -57,14 +184,14 @@ func (client blockBlobsClient) GetBlockList(ctx context.Context, listType BlockL
} }
// getBlockListPreparer prepares the GetBlockList request. // getBlockListPreparer prepares the GetBlockList request.
func (client blockBlobsClient) getBlockListPreparer(listType BlockListType, snapshot *time.Time, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { func (client blockBlobsClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil) req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
} }
params := req.URL.Query() params := req.URL.Query()
if snapshot != nil { if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", (*snapshot).Format(rfc3339Format)) params.Set("snapshot", *snapshot)
} }
params.Set("blocklisttype", fmt.Sprintf("%v", listType)) params.Set("blocklisttype", fmt.Sprintf("%v", listType))
if timeout != nil { if timeout != nil {
@ -106,39 +233,37 @@ func (client blockBlobsClient) getBlockListResponder(resp pipeline.Response) (pi
return result, nil return result, nil
} }
// PutBlock the Put Block operation creates a new block to be committed as part of a blob // StageBlock the Stage Block operation creates a new block to be committed as part of a blob
// //
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or // blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
// same size for each block. body is initial data body will be closed upon successful return. Callers should ensure // same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
// closure when receiving an error.timeout is the timeout parameter is expressed in seconds. For more information, see // successful return. Callers should ensure closure when receiving an error.timeout is the timeout parameter is
// <a // expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled. // limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client blockBlobsClient) PutBlock(ctx context.Context, blockID string, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobsPutBlockResponse, error) { func (client blockBlobsClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (*BlockBlobsStageBlockResponse, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: body,
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.putBlockPreparer(blockID, body, timeout, leaseID, requestID) req, err := client.stageBlockPreparer(blockID, contentLength, body, timeout, leaseID, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putBlockResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*BlockBlobsPutBlockResponse), err return resp.(*BlockBlobsStageBlockResponse), err
} }
// putBlockPreparer prepares the PutBlock request. // stageBlockPreparer prepares the StageBlock request.
func (client blockBlobsClient) putBlockPreparer(blockID string, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { func (client blockBlobsClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body) req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
@ -150,6 +275,7 @@ func (client blockBlobsClient) putBlockPreparer(blockID string, body io.ReadSeek
} }
params.Set("comp", "block") params.Set("comp", "block")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if leaseID != nil { if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID) req.Header.Set("x-ms-lease-id", *leaseID)
} }
@ -160,32 +286,34 @@ func (client blockBlobsClient) putBlockPreparer(blockID string, body io.ReadSeek
return req, nil return req, nil
} }
// putBlockResponder handles the response to the PutBlock request. // stageBlockResponder handles the response to the StageBlock request.
func (client blockBlobsClient) putBlockResponder(resp pipeline.Response) (pipeline.Response, error) { func (client blockBlobsClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated) err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
return &BlockBlobsPutBlockResponse{rawResponse: resp.Response()}, err io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobsStageBlockResponse{rawResponse: resp.Response()}, err
} }
// PutBlockList the Put Block List operation writes a blob by specifying the list of block IDs that make up the blob. // Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block
// In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put // blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of
// Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then // the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a
// committing the new and existing blocks together. You can do this by specifying whether to commit a block from the // block blob, use the Put Block List operation.
// committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
// block, whichever list it may belong to.
// //
// timeout is the timeout parameter is expressed in seconds. For more information, see <a // contentLength is the length of the request. body is initial data body will be closed upon successful return. Callers
// should ensure closure when receiving an error.timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified, // Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's // this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// content type. If specified, this property is stored with the blob and returned with a read request. // blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the // blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If // blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An // hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were // blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the // returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
@ -198,7 +326,7 @@ func (client blockBlobsClient) putBlockResponder(resp pipeline.Response) (pipeli
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled. // analytics logging is enabled.
func (client blockBlobsClient) PutBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobsPutBlockListResponse, error) { func (client blockBlobsClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeeker, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobsUploadResponse, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
@ -208,20 +336,20 @@ func (client blockBlobsClient) PutBlockList(ctx context.Context, blocks BlockLoo
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.putBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) req, err := client.uploadPreparer(contentLength, body, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putBlockListResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*BlockBlobsPutBlockListResponse), err return resp.(*BlockBlobsUploadResponse), err
} }
// putBlockListPreparer prepares the PutBlockList request. // uploadPreparer prepares the Upload request.
func (client blockBlobsClient) putBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { func (client blockBlobsClient) uploadPreparer(contentLength int64, body io.ReadSeeker, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil) req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
} }
@ -229,11 +357,8 @@ func (client blockBlobsClient) putBlockListPreparer(blocks BlockLookupList, time
if timeout != nil { if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout)) params.Set("timeout", fmt.Sprintf("%v", *timeout))
} }
params.Set("comp", "blocklist")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
if blobCacheControl != nil { req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if blobContentType != nil { if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType) req.Header.Set("x-ms-blob-content-type", *blobContentType)
} }
@ -244,7 +369,10 @@ func (client blockBlobsClient) putBlockListPreparer(blocks BlockLookupList, time
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
} }
if blobContentMD5 != nil { if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", *blobContentMD5) req.Header.Set("x-ms-blob-content-md5", fmt.Sprintf("%v", blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
} }
if metadata != nil { if metadata != nil {
for k, v := range metadata { for k, v := range metadata {
@ -273,23 +401,17 @@ func (client blockBlobsClient) putBlockListPreparer(blocks BlockLookupList, time
if requestID != nil { if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID) req.Header.Set("x-ms-client-request-id", *requestID)
} }
b, err := xml.Marshal(blocks) req.Header.Set("x-ms-blob-type", "BlockBlob")
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
}
req.Header.Set("Content-Type", "application/xml")
err = req.SetBody(bytes.NewReader(b))
if err != nil {
return req, pipeline.NewError(err, "failed to set request body")
}
return req, nil return req, nil
} }
// putBlockListResponder handles the response to the PutBlockList request. // uploadResponder handles the response to the Upload request.
func (client blockBlobsClient) putBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { func (client blockBlobsClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated) err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
return &BlockBlobsPutBlockListResponse{rawResponse: resp.Response()}, err io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &BlockBlobsUploadResponse{rawResponse: resp.Response()}, err
} }

Просмотреть файл

@ -4,9 +4,8 @@ package azblob
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
import ( import (
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"net/url"
) )
const ( const (

Просмотреть файл

@ -1,702 +0,0 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
"net/http"
"net/url"
"time"
)
// containerClient is the client for the Container methods of the Azblob service.
type containerClient struct {
managementClient
}
// newContainerClient creates an instance of the containerClient client.
func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient {
return containerClient{newManagementClient(url, p)}
}
// Create creates a new container under the specified account. If the container with the same name already exists, the
// operation fails
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be
// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(timeout, metadata, access, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerCreateResponse), err
}
// createPreparer prepares the Create request.
func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
req.URL.RawQuery = params.Encode()
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if access != PublicAccessNone {
req.Header.Set("x-ms-blob-public-access", fmt.Sprintf("%v", access))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// createResponder handles the response to the Create request.
func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
return &ContainerCreateResponse{rawResponse: resp.Response()}, err
}
// Delete operation marks the specified container for deletion. The container and any blobs contained within it are
// later deleted during garbage collection
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
// analytics logs when storage analytics logging is enabled.
func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*ContainerDeleteResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerDeleteResponse), err
}
// deletePreparer prepares the Delete request.
func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("DELETE", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// deleteResponder handles the response to the Delete request.
func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
if resp == nil {
return nil, err
}
return &ContainerDeleteResponse{rawResponse: resp.Response()}, err
}
// GetACL sends the get acl request.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) GetACL(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getACLPreparer(timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getACLResponder}, req)
if err != nil {
return nil, err
}
return resp.(*SignedIdentifiers), err
}
// getACLPreparer prepares the GetACL request.
func (client containerClient) getACLPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
params.Set("comp", "acl")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getACLResponder handles the response to the GetACL request.
func (client containerClient) getACLResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &SignedIdentifiers{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// GetMetadata returns all user-defined metadata for the container
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) GetMetadata(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetMetadataResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getMetadataPreparer(timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getMetadataResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerGetMetadataResponse), err
}
// getMetadataPreparer prepares the GetMetadata request.
func (client containerClient) getMetadataPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
params.Set("comp", "metadata")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getMetadataResponder handles the response to the GetMetadata request.
func (client containerClient) getMetadataResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
return &ContainerGetMetadataResponse{rawResponse: resp.Response()}, err
}
// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned
// does not include the container's list of blobs
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.getPropertiesPreparer(timeout, leaseID, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerGetPropertiesResponse), err
}
// getPropertiesPreparer prepares the GetProperties request.
func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// getPropertiesResponder handles the response to the GetProperties request.
func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err
}
// Lease establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60
// seconds, or can be infinite
//
// action is describes what lease action to take. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. breakPeriod is for a break operation, proposed duration the lease should
// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the
// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available
// before the break period has expired, but the lease may be held for longer than the break period. If this header does
// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an
// infinite lease breaks immediately. duration is specifies the duration of the lease, in seconds, or negative one (-1)
// for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be
// changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob service
// returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String)
// for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a blob if
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated,
// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
// enabled.
func (client containerClient) Lease(ctx context.Context, action LeaseActionType, timeout *int32, leaseID *string, breakPeriod *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerLeaseResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.leasePreparer(action, timeout, leaseID, breakPeriod, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.leaseResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerLeaseResponse), err
}
// leasePreparer prepares the Lease request.
func (client containerClient) leasePreparer(action LeaseActionType, timeout *int32, leaseID *string, breakPeriod *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "lease")
params.Set("restype", "container")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
req.Header.Set("x-ms-lease-action", fmt.Sprintf("%v", action))
if breakPeriod != nil {
req.Header.Set("x-ms-lease-break-period", fmt.Sprintf("%v", *breakPeriod))
}
if duration != nil {
req.Header.Set("x-ms-lease-duration", fmt.Sprintf("%v", *duration))
}
if proposedLeaseID != nil {
req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// leaseResponder handles the response to the Lease request.
func (client containerClient) leaseResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted)
if resp == nil {
return nil, err
}
return &ContainerLeaseResponse{rawResponse: resp.Response()}, err
}
// ListBlobs the List Blobs operation returns a list of the blobs under the specified container
//
// prefix is filters the results to return only containers whose name begins with the specified prefix. delimiter is
// when the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts
// as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter
// character. The delimiter may be a single character or a string. marker is a string value that identifies the portion
// of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value
// within the response body if the listing operation did not return all containers remaining to be listed with the
// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request
// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of
// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will
// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the
// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this
// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed
// in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) ListBlobs(ctx context.Context, prefix *string, delimiter *string, marker *string, maxresults *int32, include ListBlobsIncludeType, timeout *int32, requestID *string) (*ListBlobsResponse, error) {
if err := validate([]validation{
{targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false,
chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.listBlobsPreparer(prefix, delimiter, marker, maxresults, include, timeout, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobsResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ListBlobsResponse), err
}
// listBlobsPreparer prepares the ListBlobs request.
func (client containerClient) listBlobsPreparer(prefix *string, delimiter *string, marker *string, maxresults *int32, include ListBlobsIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if prefix != nil {
params.Set("prefix", *prefix)
}
if delimiter != nil {
params.Set("delimiter", *delimiter)
}
if marker != nil {
params.Set("marker", *marker)
}
if maxresults != nil {
params.Set("maxresults", fmt.Sprintf("%v", *maxresults))
}
if include != ListBlobsIncludeNone {
params.Set("include", fmt.Sprintf("%v", include))
}
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
params.Set("comp", "list")
req.URL.RawQuery = params.Encode()
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// listBlobsResponder handles the response to the ListBlobs request.
func (client containerClient) listBlobsResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
result := &ListBlobsResponse{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
}
// SetACL sends the set acl request.
//
// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and
// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified
// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has
// not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a
// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is
// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when
// storage analytics logging is enabled.
func (client containerClient) SetACL(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*ContainerSetACLResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.setACLPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setACLResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerSetACLResponse), err
}
// setACLPreparer prepares the SetACL request.
func (client containerClient) setACLPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
params.Set("comp", "acl")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if access != PublicAccessNone {
req.Header.Set("x-ms-blob-public-access", fmt.Sprintf("%v", access))
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
b, err := xml.Marshal(SignedIdentifiers{Value: containerACL})
if err != nil {
return req, pipeline.NewError(err, "failed to marshal request body")
}
req.Header.Set("Content-Type", "application/xml")
err = req.SetBody(bytes.NewReader(b))
if err != nil {
return req, pipeline.NewError(err, "failed to set request body")
}
return req, nil
}
// setACLResponder handles the response to the SetACL request.
func (client containerClient) setACLResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
return &ContainerSetACLResponse{rawResponse: resp.Response()}, err
}
// SetMetadata operation sets one or more user-defined name-value pairs for the specified container.
//
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with
// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to
// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the
// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque
// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req)
if err != nil {
return nil, err
}
return resp.(*ContainerSetMetadataResponse), err
}
// setMetadataPreparer prepares the SetMetadata request.
func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("restype", "container")
params.Set("comp", "metadata")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// setMetadataResponder handles the response to the SetMetadata request.
func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,224 +0,0 @@
package azblob
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"encoding/xml"
"reflect"
"time"
"unsafe"
)
const (
rfc3339Format = "2006-01-02T15:04:05.0000000Z07:00"
)
// used to convert times from UTC to GMT before sending across the wire
var gmt = time.FixedZone("GMT", 0)
// internal type used for marshalling time in RFC1123 format
type timeRFC1123 struct {
time.Time
}
// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123.
func (t timeRFC1123) MarshalText() ([]byte, error) {
return []byte(t.Format(time.RFC1123)), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123.
func (t *timeRFC1123) UnmarshalText(data []byte) (err error) {
t.Time, err = time.Parse(time.RFC1123, string(data))
return
}
// internal type used for marshalling time in RFC3339 format
type timeRFC3339 struct {
time.Time
}
// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339.
func (t timeRFC3339) MarshalText() ([]byte, error) {
return []byte(t.Format(rfc3339Format)), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339.
func (t *timeRFC3339) UnmarshalText(data []byte) (err error) {
t.Time, err = time.Parse(rfc3339Format, string(data))
return
}
// internal type used for marshalling
type accessPolicy struct {
Start timeRFC3339 `xml:"Start"`
Expiry timeRFC3339 `xml:"Expiry"`
Permission string `xml:"Permission"`
}
// MarshalXML implements the xml.Marshaler interface for AccessPolicy.
func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() {
panic("size mismatch between AccessPolicy and accessPolicy")
}
ap2 := (*accessPolicy)(unsafe.Pointer(&ap))
return e.EncodeElement(*ap2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy.
func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() {
panic("size mismatch between AccessPolicy and accessPolicy")
}
ap2 := (*accessPolicy)(unsafe.Pointer(ap))
err := d.DecodeElement(ap2, &start)
if err != nil {
ap = (*AccessPolicy)(unsafe.Pointer(ap2))
}
return err
}
// internal type used for marshalling
type blobProperties struct {
LastModified timeRFC1123 `xml:"Last-Modified"`
Etag ETag `xml:"Etag"`
ContentLength *int64 `xml:"Content-Length"`
ContentType *string `xml:"Content-Type"`
ContentEncoding *string `xml:"Content-Encoding"`
ContentLanguage *string `xml:"Content-Language"`
ContentMD5 *string `xml:"Content-MD5"`
ContentDisposition *string `xml:"Content-Disposition"`
CacheControl *string `xml:"Cache-Control"`
BlobSequenceNumber *int32 `xml:"x-ms-blob-sequence-number"`
BlobType BlobType `xml:"BlobType"`
LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
LeaseState LeaseStateType `xml:"LeaseState"`
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
CopyID *string `xml:"CopyId"`
CopyStatus CopyStatusType `xml:"CopyStatus"`
CopySource *string `xml:"CopySource"`
CopyProgress *string `xml:"CopyProgress"`
CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
CopyStatusDescription *string `xml:"CopyStatusDescription"`
ServerEncrypted *bool `xml:"ServerEncrypted"`
IncrementalCopy *bool `xml:"IncrementalCopy"`
DestinationSnapshot *timeRFC3339 `xml:"DestinationSnapshot"`
AccessTier AccessTierType `xml:"AccessTier"`
AccessTierInferred *bool `xml:"AccessTierInferred"`
ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"`
DeletedTime *timeRFC1123 `xml:"DeletedTime"`
RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
}
// MarshalXML implements the xml.Marshaler interface for BlobProperties.
func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() {
panic("size mismatch between BlobProperties and blobProperties")
}
bp2 := (*blobProperties)(unsafe.Pointer(&bp))
return e.EncodeElement(*bp2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties.
func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() {
panic("size mismatch between BlobProperties and blobProperties")
}
bp2 := (*blobProperties)(unsafe.Pointer(bp))
err := d.DecodeElement(bp2, &start)
if err != nil {
bp = (*BlobProperties)(unsafe.Pointer(bp2))
}
return err
}
// internal type used for marshalling
type blob struct {
Name string `xml:"Name"`
Deleted bool `xml:"Deleted"`
Snapshot timeRFC3339 `xml:"Snapshot"`
Properties BlobProperties `xml:"Properties"`
Metadata Metadata `xml:"Metadata"`
}
// MarshalXML implements the xml.Marshaler interface for Blob.
func (b Blob) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if reflect.TypeOf((*Blob)(nil)).Elem().Size() != reflect.TypeOf((*blob)(nil)).Elem().Size() {
panic("size mismatch between Blob and blob")
}
b2 := (*blob)(unsafe.Pointer(&b))
return e.EncodeElement(*b2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for Blob.
func (b *Blob) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if reflect.TypeOf((*Blob)(nil)).Elem().Size() != reflect.TypeOf((*blob)(nil)).Elem().Size() {
panic("size mismatch between Blob and blob")
}
b2 := (*blob)(unsafe.Pointer(b))
err := d.DecodeElement(b2, &start)
if err != nil {
b = (*Blob)(unsafe.Pointer(b2))
}
return err
}
// internal type used for marshalling
type containerProperties struct {
LastModified timeRFC1123 `xml:"Last-Modified"`
Etag ETag `xml:"Etag"`
LeaseStatus LeaseStatusType `xml:"LeaseStatus"`
LeaseState LeaseStateType `xml:"LeaseState"`
LeaseDuration LeaseDurationType `xml:"LeaseDuration"`
PublicAccess PublicAccessType `xml:"PublicAccess"`
}
// MarshalXML implements the xml.Marshaler interface for ContainerProperties.
func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() {
panic("size mismatch between ContainerProperties and containerProperties")
}
cp2 := (*containerProperties)(unsafe.Pointer(&cp))
return e.EncodeElement(*cp2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties.
func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() {
panic("size mismatch between ContainerProperties and containerProperties")
}
cp2 := (*containerProperties)(unsafe.Pointer(cp))
err := d.DecodeElement(cp2, &start)
if err != nil {
cp = (*ContainerProperties)(unsafe.Pointer(cp2))
}
return err
}
// internal type used for marshalling
type geoReplication struct {
Status GeoReplicationStatusType `xml:"Status"`
LastSyncTime timeRFC1123 `xml:"LastSyncTime"`
}
// MarshalXML implements the xml.Marshaler interface for GeoReplication.
func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() {
panic("size mismatch between GeoReplication and geoReplication")
}
gr2 := (*geoReplication)(unsafe.Pointer(&gr))
return e.EncodeElement(*gr2, start)
}
// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication.
func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() {
panic("size mismatch between GeoReplication and geoReplication")
}
gr2 := (*geoReplication)(unsafe.Pointer(gr))
err := d.DecodeElement(gr2, &start)
if err != nil {
gr = (*GeoReplication)(unsafe.Pointer(gr2))
}
return err
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -25,6 +25,313 @@ func newPageBlobsClient(url url.URL, p pipeline.Pipeline) pageBlobsClient {
return pageBlobsClient{newManagementClient(url, p)} return pageBlobsClient{newManagementClient(url, p)}
} }
// ClearPages the Clear Pages operation clears a set of pages from a page blob
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobsClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsClearPagesResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobsClearPagesResponse), err
}
// clearPagesPreparer prepares the ClearPages request.
func (client pageBlobsClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "page")
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter)
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifSequenceNumberLessThanOrEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-le", fmt.Sprintf("%v", *ifSequenceNumberLessThanOrEqualTo))
}
if ifSequenceNumberLessThan != nil {
req.Header.Set("x-ms-if-sequence-number-lt", fmt.Sprintf("%v", *ifSequenceNumberLessThan))
}
if ifSequenceNumberEqualTo != nil {
req.Header.Set("x-ms-if-sequence-number-eq", fmt.Sprintf("%v", *ifSequenceNumberEqualTo))
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-page-write", "clear")
return req, nil
}
// clearPagesResponder handles the response to the ClearPages request.
func (client pageBlobsClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsClearPagesResponse{rawResponse: resp.Response()}, err
}
// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are
// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
// copied from as usual. This API is supported since REST version 2016-05-31.
//
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
// expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobsClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsCopyIncrementalResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.copyIncrementalPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobsCopyIncrementalResponse), err
}
// copyIncrementalPreparer prepares the CopyIncremental request.
func (client pageBlobsClient) copyIncrementalPreparer(copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "incrementalcopy")
req.URL.RawQuery = params.Encode()
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-copy-source", copySource)
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// copyIncrementalResponder handles the response to the CopyIncremental request.
func (client pageBlobsClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsCopyIncrementalResponse{rawResponse: resp.Response()}, err
}
// Create the Create operation creates a new page blob.
//
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
// Metadata for more information. leaseID is if specified, the operation only succeeds if the container's lease is
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentLength is this
// header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte
// boundary. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobsClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (*PageBlobsCreateResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}},
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobContentLength, blobSequenceNumber, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobsCreateResponse), err
}
// createPreparer prepares the Create request.
func (client pageBlobsClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobContentLength *int64, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if blobContentType != nil {
req.Header.Set("x-ms-blob-content-type", *blobContentType)
}
if blobContentEncoding != nil {
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
}
if blobContentLanguage != nil {
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
}
if blobContentMD5 != nil {
req.Header.Set("x-ms-blob-content-md5", fmt.Sprintf("%v", blobContentMD5))
}
if blobCacheControl != nil {
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
}
if metadata != nil {
for k, v := range metadata {
req.Header.Set("x-ms-meta-"+k, v)
}
}
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if blobContentDisposition != nil {
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
if blobContentLength != nil {
req.Header.Set("x-ms-blob-content-length", fmt.Sprintf("%v", *blobContentLength))
}
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", fmt.Sprintf("%v", *blobSequenceNumber))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
req.Header.Set("x-ms-blob-type", "PageBlob")
return req, nil
}
// createResponder handles the response to the Create request.
func (client pageBlobsClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsCreateResponse{rawResponse: resp.Response()}, err
}
// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a // GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a
// page blob // page blob
// //
@ -33,26 +340,22 @@ func newPageBlobsClient(url url.URL, p pipeline.Pipeline) pageBlobsClient {
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a // a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot // Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between // range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes // since the specified date/time. ifMatches is specify an ETag value to operate only on blobs with a matching value.
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the container's lease is // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if // analytics logging is enabled.
// it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs func (client pageBlobsClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
// logs when storage analytics logging is enabled.
func (client pageBlobsClient) GetPageRanges(ctx context.Context, snapshot *time.Time, timeout *int32, prevsnapshot *time.Time, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.getPageRangesPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -64,21 +367,18 @@ func (client pageBlobsClient) GetPageRanges(ctx context.Context, snapshot *time.
} }
// getPageRangesPreparer prepares the GetPageRanges request. // getPageRangesPreparer prepares the GetPageRanges request.
func (client pageBlobsClient) getPageRangesPreparer(snapshot *time.Time, timeout *int32, prevsnapshot *time.Time, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { func (client pageBlobsClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil) req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
} }
params := req.URL.Query() params := req.URL.Query()
if snapshot != nil { if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", (*snapshot).Format(rfc3339Format)) params.Set("snapshot", *snapshot)
} }
if timeout != nil { if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout)) params.Set("timeout", fmt.Sprintf("%v", *timeout))
} }
if prevsnapshot != nil {
params.Set("prevsnapshot", (*prevsnapshot).Format(rfc3339Format))
}
params.Set("comp", "pagelist") params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
if rangeParameter != nil { if rangeParameter != nil {
@ -130,64 +430,67 @@ func (client pageBlobsClient) getPageRangesResponder(resp pipeline.Response) (pi
return result, nil return result, nil
} }
// IncrementalCopy the Incremental Copy Blob operation copies a snapshot of the source page blob to a destination page // GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob
// blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are // that were changed between target blob and previous snapshot.
// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
// copied from as usual. This API is supported since REST version 2016-05-31.
// //
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob // retrieve. For more information on working with blob snapshots, see <a
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
// expressed in seconds. For more information, see <a // a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> metadata is optional. Specifies a user-defined name-value pair associated // Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot
// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // parameter is a DateTime value that specifies that the response will contain only pages that were changed between
// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only // of the blob in the specified range. leaseID is if specified, the operation only succeeds if the container's lease is
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
// operate only on a blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value // modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
// recorded in the analytics logs when storage analytics logging is enabled. // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
func (client pageBlobsClient) IncrementalCopy(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsIncrementalCopyResponse, error) { // logs when storage analytics logging is enabled.
func (client pageBlobsClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
{targetValue: metadata,
constraints: []constraint{{target: "metadata", name: null, rule: false,
chain: []constraint{{target: "metadata", name: pattern, rule: `^[a-zA-Z]+$`, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.incrementalCopyPreparer(copySource, timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.incrementalCopyResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*PageBlobsIncrementalCopyResponse), err return resp.(*PageList), err
} }
// incrementalCopyPreparer prepares the IncrementalCopy request. // getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
func (client pageBlobsClient) incrementalCopyPreparer(copySource string, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { func (client pageBlobsClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil) req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
} }
params := req.URL.Query() params := req.URL.Query()
if snapshot != nil && len(*snapshot) > 0 {
params.Set("snapshot", *snapshot)
}
if timeout != nil { if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout)) params.Set("timeout", fmt.Sprintf("%v", *timeout))
} }
params.Set("comp", "incrementalcopy") if prevsnapshot != nil && len(*prevsnapshot) > 0 {
params.Set("prevsnapshot", *prevsnapshot)
}
params.Set("comp", "pagelist")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
if metadata != nil { if rangeParameter != nil {
for k, v := range metadata { req.Header.Set("x-ms-range", *rangeParameter)
req.Header.Set("x-ms-meta-"+k, v) }
} if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
} }
if ifModifiedSince != nil { if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
@ -201,7 +504,6 @@ func (client pageBlobsClient) incrementalCopyPreparer(copySource string, timeout
if ifNoneMatch != nil { if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch)) req.Header.Set("If-None-Match", string(*ifNoneMatch))
} }
req.Header.Set("x-ms-copy-source", copySource)
req.Header.Set("x-ms-version", ServiceVersion) req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil { if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID) req.Header.Set("x-ms-client-request-id", *requestID)
@ -209,24 +511,194 @@ func (client pageBlobsClient) incrementalCopyPreparer(copySource string, timeout
return req, nil return req, nil
} }
// incrementalCopyResponder handles the response to the IncrementalCopy request. // getPageRangesDiffResponder handles the response to the GetPageRangesDiff request.
func (client pageBlobsClient) incrementalCopyResponder(resp pipeline.Response) (pipeline.Response, error) { func (client pageBlobsClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusAccepted) err := validateResponse(resp, http.StatusOK)
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
return &PageBlobsIncrementalCopyResponse{rawResponse: resp.Response()}, err result := &PageList{rawResponse: resp.Response()}
if err != nil {
return result, err
}
defer resp.Response().Body.Close()
b, err := ioutil.ReadAll(resp.Response().Body)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to read response body")
}
if len(b) > 0 {
err = xml.Unmarshal(b, result)
if err != nil {
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
}
}
return result, nil
} }
// PutPage the Put Page operation writes a range of pages to a page blob // Resize resize the Blob
// //
// pageWrite is required. You may specify one of the following options: // blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must
// - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length // be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
// headers must match to perform the update. // see <a
// - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// size. optionalbody is initial data optionalbody will be closed upon successful return. Callers should ensure closure // lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
// when receiving an error.timeout is the timeout parameter is expressed in seconds. For more information, see <a // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the
// analytics logs when storage analytics logging is enabled.
func (client pageBlobsClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsResizeResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobsResizeResponse), err
}
// resizePreparer prepares the Resize request.
func (client pageBlobsClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-blob-content-length", fmt.Sprintf("%v", blobContentLength))
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// resizeResponder handles the response to the Resize request.
func (client pageBlobsClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsResizeResponse{rawResponse: resp.Response()}, err
}
// UpdateSequenceNumber update the sequence number of the blob
//
// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
// is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the container's
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
// blob if it has not been modified since the specified date/time. ifMatches is specify an ETag value to operate only
// on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching
// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can
// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
func (client pageBlobsClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobsUpdateSequenceNumberResponse, error) {
if err := validate([]validation{
{targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err
}
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, blobSequenceNumber, requestID)
if err != nil {
return nil, err
}
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req)
if err != nil {
return nil, err
}
return resp.(*PageBlobsUpdateSequenceNumberResponse), err
}
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
func (client pageBlobsClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, nil)
if err != nil {
return req, pipeline.NewError(err, "failed to create request")
}
params := req.URL.Query()
if timeout != nil {
params.Set("timeout", fmt.Sprintf("%v", *timeout))
}
params.Set("comp", "properties")
req.URL.RawQuery = params.Encode()
if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID)
}
if ifModifiedSince != nil {
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
}
if ifUnmodifiedSince != nil {
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
}
if ifMatches != nil {
req.Header.Set("If-Match", string(*ifMatches))
}
if ifNoneMatch != nil {
req.Header.Set("If-None-Match", string(*ifNoneMatch))
}
req.Header.Set("x-ms-sequence-number-action", fmt.Sprintf("%v", sequenceNumberAction))
if blobSequenceNumber != nil {
req.Header.Set("x-ms-blob-sequence-number", fmt.Sprintf("%v", *blobSequenceNumber))
}
req.Header.Set("x-ms-version", ServiceVersion)
if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID)
}
return req, nil
}
// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request.
func (client pageBlobsClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK)
if resp == nil {
return nil, err
}
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err
}
// UploadPages the Upload Pages operation writes a range of pages to a page blob
//
// contentLength is the length of the request. body is initial data body will be closed upon successful return. Callers
// should ensure closure when receiving an error.timeout is the timeout parameter is expressed in seconds. For more
// information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified // Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
// range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID. // range. leaseID is if specified, the operation only succeeds if the container's lease is active and matches this ID.
@ -239,26 +711,26 @@ func (client pageBlobsClient) incrementalCopyResponder(resp pipeline.Response) (
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
// recorded in the analytics logs when storage analytics logging is enabled. // recorded in the analytics logs when storage analytics logging is enabled.
func (client pageBlobsClient) PutPage(ctx context.Context, pageWrite PageWriteType, body io.ReadSeeker, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int32, ifSequenceNumberLessThan *int32, ifSequenceNumberEqualTo *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsPutPageResponse, error) { func (client pageBlobsClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeeker, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobsUploadPagesResponse, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.putPagePreparer(pageWrite, body, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID) req, err := client.uploadPagesPreparer(contentLength, body, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatches, ifNoneMatch, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putPageResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*PageBlobsPutPageResponse), err return resp.(*PageBlobsUploadPagesResponse), err
} }
// putPagePreparer prepares the PutPage request. // uploadPagesPreparer prepares the UploadPages request.
func (client pageBlobsClient) putPagePreparer(pageWrite PageWriteType, body io.ReadSeeker, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int32, ifSequenceNumberLessThan *int32, ifSequenceNumberEqualTo *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { func (client pageBlobsClient) uploadPagesPreparer(contentLength int64, body io.ReadSeeker, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatches *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("PUT", client.url, body) req, err := pipeline.NewRequest("PUT", client.url, body)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
@ -269,10 +741,10 @@ func (client pageBlobsClient) putPagePreparer(pageWrite PageWriteType, body io.R
} }
params.Set("comp", "page") params.Set("comp", "page")
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
req.Header.Set("Content-Length", fmt.Sprintf("%v", contentLength))
if rangeParameter != nil { if rangeParameter != nil {
req.Header.Set("x-ms-range", *rangeParameter) req.Header.Set("x-ms-range", *rangeParameter)
} }
req.Header.Set("x-ms-page-write", fmt.Sprintf("%v", pageWrite))
if leaseID != nil { if leaseID != nil {
req.Header.Set("x-ms-lease-id", *leaseID) req.Header.Set("x-ms-lease-id", *leaseID)
} }
@ -301,14 +773,17 @@ func (client pageBlobsClient) putPagePreparer(pageWrite PageWriteType, body io.R
if requestID != nil { if requestID != nil {
req.Header.Set("x-ms-client-request-id", *requestID) req.Header.Set("x-ms-client-request-id", *requestID)
} }
req.Header.Set("x-ms-page-write", "update")
return req, nil return req, nil
} }
// putPageResponder handles the response to the PutPage request. // uploadPagesResponder handles the response to the UploadPages request.
func (client pageBlobsClient) putPageResponder(resp pipeline.Response) (pipeline.Response, error) { func (client pageBlobsClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK, http.StatusCreated) err := validateResponse(resp, http.StatusOK, http.StatusCreated)
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
return &PageBlobsPutPageResponse{rawResponse: resp.Response()}, err io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &PageBlobsUploadPagesResponse{rawResponse: resp.Response()}, err
} }

Просмотреть файл

@ -9,6 +9,7 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
@ -93,33 +94,33 @@ func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipe
return result, nil return result, nil
} }
// GetStats retrieves statistics related to replication for the Blob service. It is only available on the secondary // GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the
// location endpoint when read-access geo-redundant replication is enabled for the storage account. // secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.
// //
// timeout is the timeout parameter is expressed in seconds. For more information, see <a // timeout is the timeout parameter is expressed in seconds. For more information, see <a
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) GetStats(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.getStatsPreparer(timeout, requestID) req, err := client.getStatisticsPreparer(timeout, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatsResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*StorageServiceStats), err return resp.(*StorageServiceStats), err
} }
// getStatsPreparer prepares the GetStats request. // getStatisticsPreparer prepares the GetStatistics request.
func (client serviceClient) getStatsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil) req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
@ -138,8 +139,8 @@ func (client serviceClient) getStatsPreparer(timeout *int32, requestID *string)
return req, nil return req, nil
} }
// getStatsResponder handles the response to the GetStats request. // getStatisticsResponder handles the response to the GetStatistics request.
func (client serviceClient) getStatsResponder(resp pipeline.Response) (pipeline.Response, error) { func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK) err := validateResponse(resp, http.StatusOK)
if resp == nil { if resp == nil {
return nil, err return nil, err
@ -162,7 +163,8 @@ func (client serviceClient) getStatsResponder(resp pipeline.Response) (pipeline.
return result, nil return result, nil
} }
// ListContainers the List Containers operation returns a list of the containers under the specified account // ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified
// account
// //
// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a // prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a
// string value that identifies the portion of the list of containers to be returned with the next listing operation. // string value that identifies the portion of the list of containers to be returned with the next listing operation.
@ -179,7 +181,7 @@ func (client serviceClient) getStatsResponder(resp pipeline.Response) (pipeline.
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
// character limit that is recorded in the analytics logs when storage analytics logging is enabled. // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
func (client serviceClient) ListContainers(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) { func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersResponse, error) {
if err := validate([]validation{ if err := validate([]validation{
{targetValue: maxresults, {targetValue: maxresults,
constraints: []constraint{{target: "maxresults", name: null, rule: false, constraints: []constraint{{target: "maxresults", name: null, rule: false,
@ -189,28 +191,28 @@ func (client serviceClient) ListContainers(ctx context.Context, prefix *string,
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
return nil, err return nil, err
} }
req, err := client.listContainersPreparer(prefix, marker, maxresults, include, timeout, requestID) req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersResponder}, req) resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return resp.(*ListContainersResponse), err return resp.(*ListContainersResponse), err
} }
// listContainersPreparer prepares the ListContainers request. // listContainersSegmentPreparer prepares the ListContainersSegment request.
func (client serviceClient) listContainersPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
req, err := pipeline.NewRequest("GET", client.url, nil) req, err := pipeline.NewRequest("GET", client.url, nil)
if err != nil { if err != nil {
return req, pipeline.NewError(err, "failed to create request") return req, pipeline.NewError(err, "failed to create request")
} }
params := req.URL.Query() params := req.URL.Query()
if prefix != nil { if prefix != nil && len(*prefix) > 0 {
params.Set("prefix", *prefix) params.Set("prefix", *prefix)
} }
if marker != nil { if marker != nil && len(*marker) > 0 {
params.Set("marker", *marker) params.Set("marker", *marker)
} }
if maxresults != nil { if maxresults != nil {
@ -231,8 +233,8 @@ func (client serviceClient) listContainersPreparer(prefix *string, marker *strin
return req, nil return req, nil
} }
// listContainersResponder handles the response to the ListContainers request. // listContainersSegmentResponder handles the response to the ListContainersSegment request.
func (client serviceClient) listContainersResponder(resp pipeline.Response) (pipeline.Response, error) { func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) {
err := validateResponse(resp, http.StatusOK) err := validateResponse(resp, http.StatusOK)
if resp == nil { if resp == nil {
return nil, err return nil, err
@ -267,40 +269,26 @@ func (client serviceClient) SetProperties(ctx context.Context, storageServicePro
if err := validate([]validation{ if err := validate([]validation{
{targetValue: storageServiceProperties, {targetValue: storageServiceProperties,
constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.Logging.Version", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true,
{target: "storageServiceProperties.Logging.Delete", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false,
{target: "storageServiceProperties.Logging.Read", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
{target: "storageServiceProperties.Logging.Write", name: null, rule: true, chain: nil}, }},
{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true,
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Enabled", name: null, rule: true, chain: nil},
{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}}, }},
{target: "storageServiceProperties.HourMetrics", name: null, rule: false, {target: "storageServiceProperties.HourMetrics", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.HourMetrics.Version", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false,
{target: "storageServiceProperties.HourMetrics.Enabled", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false,
{target: "storageServiceProperties.HourMetrics.IncludeAPIs", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: true, }},
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Enabled", name: null, rule: true, chain: nil},
{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}}, }},
{target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.Version", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false,
{target: "storageServiceProperties.MinuteMetrics.Enabled", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false,
{target: "storageServiceProperties.MinuteMetrics.IncludeAPIs", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: true, }},
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Enabled", name: null, rule: true, chain: nil},
{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}},
}}, }},
{target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false,
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Enabled", name: null, rule: true, chain: nil}, chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false,
{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
}}}}, }}}},
{targetValue: timeout, {targetValue: timeout,
constraints: []constraint{{target: "timeout", name: null, rule: false, constraints: []constraint{{target: "timeout", name: null, rule: false,
@ -353,5 +341,7 @@ func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipe
if resp == nil { if resp == nil {
return nil, err return nil, err
} }
io.Copy(ioutil.Discard, resp.Response().Body)
resp.Response().Body.Close()
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
} }

Просмотреть файл

@ -5,11 +5,10 @@ package azblob
import ( import (
"fmt" "fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"reflect" "reflect"
"regexp" "regexp"
"strings" "strings"
"github.com/Azure/azure-pipeline-go/pipeline"
) )
// Constraint stores constraint name, target field name // Constraint stores constraint name, target field name
@ -72,7 +71,6 @@ func validate(m []validation) error {
err = validateFloat(v, constraint) err = validateFloat(v, constraint)
case reflect.Array, reflect.Slice, reflect.Map: case reflect.Array, reflect.Slice, reflect.Map:
err = validateArrayMap(v, constraint) err = validateArrayMap(v, constraint)
case reflect.Bool:
default: default:
err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
} }

Просмотреть файл

@ -5,7 +5,7 @@ package azblob
// UserAgent returns the UserAgent string to use when sending http.Requests. // UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string { func UserAgent() string {
return "Azure-SDK-For-Go/0.0.0 arm-azblob/2017-07-29" return "Azure-SDK-For-Go/0.0.0 azblob/2017-07-29"
} }
// Version returns the semantic version (see http://semver.org) of the client. // Version returns the semantic version (see http://semver.org) of the client.

257
2017-07-29/azblob/zz_response_helpers.go Normal file → Executable file
Просмотреть файл

@ -1,40 +1,22 @@
package azblob package azblob
import ( import (
"crypto/md5" "context"
"encoding/base64" "io"
"net/http"
"time"
) )
// BlobHTTPHeaders contains read/writeable blob properties. // BlobHTTPHeaders contains read/writeable blob properties.
type BlobHTTPHeaders struct { type BlobHTTPHeaders struct {
ContentType string ContentType string
ContentMD5 [md5.Size]byte ContentMD5 []byte
ContentEncoding string ContentEncoding string
ContentLanguage string ContentLanguage string
ContentDisposition string ContentDisposition string
CacheControl string CacheControl string
} }
func (h BlobHTTPHeaders) contentMD5Pointer() *string {
if h.ContentMD5 == [md5.Size]byte{} {
return nil
}
str := base64.StdEncoding.EncodeToString(h.ContentMD5[:])
return &str
}
// NewHTTPHeaders returns the user-modifiable properties for this blob.
func (gr GetResponse) NewHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
ContentType: gr.ContentType(),
ContentEncoding: gr.ContentEncoding(),
ContentLanguage: gr.ContentLanguage(),
ContentDisposition: gr.ContentDisposition(),
CacheControl: gr.CacheControl(),
ContentMD5: gr.ContentMD5(),
}
}
// NewHTTPHeaders returns the user-modifiable properties for this blob. // NewHTTPHeaders returns the user-modifiable properties for this blob.
func (bgpr BlobsGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { func (bgpr BlobsGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{ return BlobHTTPHeaders{
@ -47,54 +29,211 @@ func (bgpr BlobsGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders {
} }
} }
func md5StringToMD5(md5String string) (hash [md5.Size]byte) { ///////////////////////////////////////////////////////////////////////////////
if md5String == "" {
return // NewHTTPHeaders returns the user-modifiable properties for this blob.
func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
ContentType: dr.ContentType(),
ContentEncoding: dr.ContentEncoding(),
ContentLanguage: dr.ContentLanguage(),
ContentDisposition: dr.ContentDisposition(),
CacheControl: dr.CacheControl(),
ContentMD5: dr.ContentMD5(),
} }
md5Slice, err := base64.StdEncoding.DecodeString(md5String) }
if err != nil {
panic(err) ///////////////////////////////////////////////////////////////////////////////
// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
type DownloadResponse struct {
r *downloadResponse
ctx context.Context
b BlobURL
getInfo HTTPGetterInfo
}
// Body constructs new RetryReader stream for reading data. If a connection failes
// while reading, it will make additional requests to reestablish a connection and
// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
// (the default), returns the original response body and no retries will be performed.
func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
if o.MaxRetryRequests == 0 { // No additional retries
return r.Response().Body
} }
copy(hash[:], md5Slice) return NewRetryReader(r.ctx, r.Response(), r.getInfo, o,
return func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
BlobAccessConditions{
HTTPAccessConditions: HTTPAccessConditions{IfMatch: getInfo.ETag},
},
false)
return resp.Response(), err
},
)
} }
// ContentMD5 returns the value for header Content-MD5. // Response returns the raw HTTP response object.
func (ababr AppendBlobsAppendBlockResponse) ContentMD5() [md5.Size]byte { func (r DownloadResponse) Response() *http.Response {
return md5StringToMD5(ababr.rawResponse.Header.Get("Content-MD5")) return r.r.Response()
} }
// ContentMD5 returns the value for header Content-MD5. // NewHTTPHeaders returns the user-modifiable properties for this blob.
func (bgpr BlobsGetPropertiesResponse) ContentMD5() [md5.Size]byte { func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
return md5StringToMD5(bgpr.rawResponse.Header.Get("Content-MD5")) return r.r.NewHTTPHeaders()
}
// ContentMD5 returns the value for header Content-MD5.
func (bpr BlobsPutResponse) ContentMD5() [md5.Size]byte {
return md5StringToMD5(bpr.rawResponse.Header.Get("Content-MD5"))
}
// ContentMD5 returns the value for header Content-MD5.
func (bbpblr BlockBlobsPutBlockListResponse) ContentMD5() [md5.Size]byte {
return md5StringToMD5(bbpblr.rawResponse.Header.Get("Content-MD5"))
}
// ContentMD5 returns the value for header Content-MD5.
func (bbpbr BlockBlobsPutBlockResponse) ContentMD5() [md5.Size]byte {
return md5StringToMD5(bbpbr.rawResponse.Header.Get("Content-MD5"))
} }
// BlobContentMD5 returns the value for header x-ms-blob-content-md5. // BlobContentMD5 returns the value for header x-ms-blob-content-md5.
func (gr GetResponse) BlobContentMD5() [md5.Size]byte { func (r DownloadResponse) BlobContentMD5() []byte {
return md5StringToMD5(gr.rawResponse.Header.Get("x-ms-blob-content-md5")) return r.r.BlobContentMD5()
} }
// ContentMD5 returns the value for header Content-MD5. // ContentMD5 returns the value for header Content-MD5.
func (gr GetResponse) ContentMD5() [md5.Size]byte { func (r DownloadResponse) ContentMD5() []byte {
return md5StringToMD5(gr.rawResponse.Header.Get("Content-MD5")) return r.r.ContentMD5()
} }
// ContentMD5 returns the value for header Content-MD5. // StatusCode returns the HTTP status code of the response, e.g. 200.
func (pbppr PageBlobsPutPageResponse) ContentMD5() [md5.Size]byte { func (r DownloadResponse) StatusCode() int {
return md5StringToMD5(pbppr.rawResponse.Header.Get("Content-MD5")) return r.r.StatusCode()
}
// Status returns the HTTP status message of the response, e.g. "200 OK".
func (r DownloadResponse) Status() string {
return r.r.Status()
}
// AcceptRanges returns the value for header Accept-Ranges.
func (r DownloadResponse) AcceptRanges() string {
return r.r.AcceptRanges()
}
// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
func (r DownloadResponse) BlobCommittedBlockCount() int32 {
return r.r.BlobCommittedBlockCount()
}
// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
func (r DownloadResponse) BlobSequenceNumber() int64 {
return r.r.BlobSequenceNumber()
}
// BlobType returns the value for header x-ms-blob-type.
func (r DownloadResponse) BlobType() BlobType {
return r.r.BlobType()
}
// CacheControl returns the value for header Cache-Control.
func (r DownloadResponse) CacheControl() string {
return r.r.CacheControl()
}
// ContentDisposition returns the value for header Content-Disposition.
func (r DownloadResponse) ContentDisposition() string {
return r.r.ContentDisposition()
}
// ContentEncoding returns the value for header Content-Encoding.
func (r DownloadResponse) ContentEncoding() string {
return r.r.ContentEncoding()
}
// ContentLanguage returns the value for header Content-Language.
func (r DownloadResponse) ContentLanguage() string {
return r.r.ContentLanguage()
}
// ContentLength returns the value for header Content-Length.
func (r DownloadResponse) ContentLength() int64 {
return r.r.ContentLength()
}
// ContentRange returns the value for header Content-Range.
func (r DownloadResponse) ContentRange() string {
return r.r.ContentRange()
}
// ContentType returns the value for header Content-Type.
func (r DownloadResponse) ContentType() string {
return r.r.ContentType()
}
// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
func (r DownloadResponse) CopyCompletionTime() time.Time {
return r.r.CopyCompletionTime()
}
// CopyID returns the value for header x-ms-copy-id.
func (r DownloadResponse) CopyID() string {
return r.r.CopyID()
}
// CopyProgress returns the value for header x-ms-copy-progress.
func (r DownloadResponse) CopyProgress() string {
return r.r.CopyProgress()
}
// CopySource returns the value for header x-ms-copy-source.
func (r DownloadResponse) CopySource() string {
return r.r.CopySource()
}
// CopyStatus returns the value for header x-ms-copy-status.
func (r DownloadResponse) CopyStatus() CopyStatusType {
return r.r.CopyStatus()
}
// CopyStatusDescription returns the value for header x-ms-copy-status-description.
func (r DownloadResponse) CopyStatusDescription() string {
return r.r.CopyStatusDescription()
}
// Date returns the value for header Date.
func (r DownloadResponse) Date() time.Time {
return r.r.Date()
}
// ETag returns the value for header ETag.
func (r DownloadResponse) ETag() ETag {
return r.r.ETag()
}
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
func (r DownloadResponse) IsServerEncrypted() string {
return r.r.IsServerEncrypted()
}
// LastModified returns the value for header Last-Modified.
func (r DownloadResponse) LastModified() time.Time {
return r.r.LastModified()
}
// LeaseDuration returns the value for header x-ms-lease-duration.
func (r DownloadResponse) LeaseDuration() LeaseDurationType {
return r.r.LeaseDuration()
}
// LeaseState returns the value for header x-ms-lease-state.
func (r DownloadResponse) LeaseState() LeaseStateType {
return r.r.LeaseState()
}
// LeaseStatus returns the value for header x-ms-lease-status.
func (r DownloadResponse) LeaseStatus() LeaseStatusType {
return r.r.LeaseStatus()
}
// RequestID returns the value for header x-ms-request-id.
func (r DownloadResponse) RequestID() string {
return r.r.RequestID()
}
// Version returns the value for header x-ms-version.
func (r DownloadResponse) Version() string {
return r.r.Version()
}
// NewMetadata returns user-defined key/value pairs.
func (r DownloadResponse) NewMetadata() Metadata {
return r.r.NewMetadata()
} }

42
Gopkg.lock сгенерированный Normal file → Executable file
Просмотреть файл

@ -1,21 +1,21 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]] [[projects]]
name = "github.com/Azure/azure-pipeline-go" name = "github.com/Azure/azure-pipeline-go"
packages = ["pipeline"] packages = ["pipeline"]
revision = "9804b770d6810999e2bcb7480fb509504e72b2ba" revision = "0f0dbf237bd47d5688310dbd3fac369353f280d0"
version = "0.1.4" version = "0.1.5"
[[projects]] [[projects]]
branch = "v1" branch = "v1"
name = "gopkg.in/check.v1" name = "gopkg.in/check.v1"
packages = ["."] packages = ["."]
revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec" revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "31410b00fb3923035bcfdfb37ad828c04e176bd86f7367b5f8df9548e5cc93a2" inputs-digest = "a6aef008d020b1455ef28ec8264c056f69bc26c0d32f711127c05dde5802f737"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

46
Gopkg.toml Normal file → Executable file
Просмотреть файл

@ -1,8 +1,38 @@
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md # Gopkg.toml example
[[constraint]] #
name = "github.com/Azure/azure-pipeline-go" # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
branch = "master" # for detailed Gopkg.toml documentation.
#
[[constraint]] # required = ["github.com/user/thing/cmd/thing"]
name = "gopkg.in/check.v1" # This project is only needed if you run "go test" on the Azure Storage packages # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
branch = "v1" # Project version constraint #
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/Azure/azure-pipeline-go"
version = "0.1.5"
[[constraint]]
branch = "v1"
name = "gopkg.in/check.v1"
[prune]
go-tests = true
unused-packages = true