Vendor the azblob 0.6.0 dependency so customers can upgrade to later blobs packages (#262)
* vendoring 0.6.0 of azure-storage-blob-go * updating internal references * Attempting internal vendoring for some pieces, public interface coming from 'azblob'. * slight modifications * linter clean-up * skip lint and gocyclo vendored azblob * fix additional formatting * skip gocyclo and linting azblob in integration tests * use gocyclo ignore instead of finding files * Even though they're the vendored and non-vendored azblob's ServiceCodeTypes are strings they don't "match" when it comes to determining if they're the same type. Aliasing should work just fine here. * fix incorrect comment * Update version and changelog Co-authored-by: ripark <ripark@microsoft.com> Co-authored-by: Joel Hendrix <jhendrix@microsoft.com>
This commit is contained in:
Родитель
d88c0a040d
Коммит
adc9788f66
5
Makefile
5
Makefile
|
@ -3,7 +3,6 @@ DATE ?= $(shell date +%FT%T%z)
|
|||
VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
|
||||
cat $(CURDIR)/.version 2> /dev/null || echo v0)
|
||||
BIN = $(GOPATH)/bin
|
||||
GO_FILES = find . -iname '*.go' -type f | grep -v /vendor/
|
||||
|
||||
GO = go
|
||||
GODOC = godoc
|
||||
|
@ -52,7 +51,7 @@ tidy: ; $(info $(M) running go mod tidy…) @ ## Run tidy
|
|||
|
||||
.PHONY: lint
|
||||
lint: ; $(info $(M) running golangci-lint…) @ ## Run golangci-lint
|
||||
$Q $(GOLINT) run
|
||||
$Q $(GOLINT) run --skip-dirs "internal/azure-storage-blob-go"
|
||||
|
||||
.PHONY: staticcheck
|
||||
staticcheck: ; $(info $(M) running staticcheck…) @ ## Run staticcheck
|
||||
|
@ -66,7 +65,7 @@ fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files
|
|||
|
||||
.PHONY: cyclo
|
||||
cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files
|
||||
$Q $(GOCYCLO) -over 19 $$($(GO_FILES))
|
||||
$Q $(GOCYCLO) -over 19 -ignore "internal/azure-storage-blob-go" .
|
||||
|
||||
terraform.tfstate: azuredeploy.tf $(wildcard terraform.tfvars) .terraform ; $(info $(M) running terraform...) @ ## Run terraform to provision infrastructure needed for testing
|
||||
$Q TF_VAR_azure_client_secret="$${ARM_CLIENT_SECRET}" terraform apply -auto-approve
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
# Change Log
|
||||
|
||||
## `v3.3.19`
|
||||
|
||||
- Vendor a copy of `azblob` to avoid compilation errors with mismatched versions [#261](https://github.com/Azure/azure-event-hubs-go/issues/261)
|
||||
|
||||
## `v3.3.18`
|
||||
|
||||
- Fixing issue where the LeaserCheckpointer could fail with a "ContainerAlreadyExists" error. (#253)
|
||||
|
|
|
@ -54,11 +54,11 @@ jobs:
|
|||
- script: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.47.2
|
||||
golangci-lint --version
|
||||
golangci-lint run
|
||||
golangci-lint run --skip-dirs "internal/azure-storage-blob-go"
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Install and Run GoLintCLI'
|
||||
- script: |
|
||||
gocyclo -over 19 .
|
||||
gocyclo -over 19 -ignore "internal/azure-storage-blob-go" .
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Cyclo'
|
||||
- script: |
|
||||
|
|
5
go.mod
5
go.mod
|
@ -4,9 +4,9 @@ go 1.13
|
|||
|
||||
require (
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.2.3
|
||||
github.com/Azure/azure-pipeline-go v0.1.9
|
||||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||
github.com/Azure/go-amqp v0.17.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.28
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21
|
||||
|
@ -20,7 +20,6 @@ require (
|
|||
github.com/joho/godotenv v1.3.0
|
||||
github.com/jpillora/backoff v1.0.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
golang.org/x/net v0.0.0-20220725212005-46097bf591d3
|
||||
|
|
28
go.sum
28
go.sum
|
@ -1,12 +1,11 @@
|
|||
github.com/Azure/azure-amqp-common-go/v3 v3.2.3 h1:uDF62mbd9bypXWi19V1bN5NZEO84JqgmI5G73ibAmrk=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
|
||||
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0 h1:SEATKb3LIHcaSIX+E6/K4kJpwfuozFEsmt5rS56N6CE=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
|
||||
github.com/Azure/go-amqp v0.17.0 h1:HHXa3149nKrI0IZwyM7DRcRy5810t9ZICDutn4BYzj4=
|
||||
github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
|
@ -65,12 +64,21 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
|
|||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
|
@ -91,17 +99,21 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220725212005-46097bf591d3 h1:2yWTtPWWRcISTw3/o+s/Y4UOMnQL71DWyToOANFusCg=
|
||||
golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -111,14 +123,16 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
|
|||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405 h1:829vOVxxusYHC+IqBtkX5mbKtsY9fheQiQn0MZRVLfQ=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
|
@ -0,0 +1,65 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
||||
type ModifiedAccessConditions struct {
|
||||
IfModifiedSince time.Time
|
||||
IfUnmodifiedSince time.Time
|
||||
IfMatch ETag
|
||||
IfNoneMatch ETag
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
||||
if !ac.IfModifiedSince.IsZero() {
|
||||
ims = &ac.IfModifiedSince
|
||||
}
|
||||
if !ac.IfUnmodifiedSince.IsZero() {
|
||||
ius = &ac.IfUnmodifiedSince
|
||||
}
|
||||
if ac.IfMatch != ETagNone {
|
||||
ime = &ac.IfMatch
|
||||
}
|
||||
if ac.IfNoneMatch != ETagNone {
|
||||
inme = &ac.IfNoneMatch
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
||||
type ContainerAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
}
|
||||
|
||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
||||
type BlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
}
|
||||
|
||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
||||
type LeaseAccessConditions struct {
|
||||
LeaseID string
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac LeaseAccessConditions) pointers() (leaseID *string) {
|
||||
if ac.LeaseID != "" {
|
||||
leaseID = &ac.LeaseID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// getInt32 is for internal infrastructure. It is used with access condition values where
|
||||
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
|
||||
// and the privately-storage field in the access condition object is stored as +1 higher than desired.
|
||||
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
|
||||
func getInt32(value int32) (bool, int32) {
|
||||
return value > 0, value - 1
|
||||
}
|
||||
*/
|
|
@ -0,0 +1,69 @@
|
|||
package azblob
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{})
|
||||
|
||||
const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadInt32(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadUint32(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadInt64(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
|
||||
// The AtomicMorpher callback is passed a startValue and based on this value it returns
|
||||
// what the new value should be and the result that AtomicMorph should return to its caller.
|
||||
type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{})
|
||||
|
||||
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
|
||||
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
|
||||
for {
|
||||
currentVal := atomic.LoadUint64(target)
|
||||
desiredVal, morphResult := morpher(currentVal)
|
||||
if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) {
|
||||
return morphResult
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,535 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"bytes"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// CommonResponse returns the headers common to all blob REST API responses.
|
||||
type CommonResponse interface {
|
||||
// ETag returns the value for header ETag.
|
||||
ETag() ETag
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
LastModified() time.Time
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
RequestID() string
|
||||
|
||||
// Date returns the value for header Date.
|
||||
Date() time.Time
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
Version() string
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
|
||||
type UploadToBlockBlobOptions struct {
|
||||
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
|
||||
BlockSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
||||
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
||||
BlobHTTPHeaders BlobHTTPHeaders
|
||||
|
||||
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
|
||||
Metadata Metadata
|
||||
|
||||
// AccessConditions indicates the access conditions for the block blob.
|
||||
AccessConditions BlobAccessConditions
|
||||
|
||||
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
|
||||
Parallelism uint16
|
||||
}
|
||||
|
||||
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
||||
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
||||
bufferSize := int64(len(b))
|
||||
if o.BlockSize == 0 {
|
||||
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
||||
if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
|
||||
return nil, errors.New("Buffer is too large to upload to a block blob")
|
||||
}
|
||||
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
||||
} else {
|
||||
o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
||||
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||
}
|
||||
// StageBlock will be called with blockSize blocks and a parallelism of (BufferSize / BlockSize).
|
||||
}
|
||||
}
|
||||
|
||||
if bufferSize <= BlockBlobMaxUploadBlobBytes {
|
||||
// If the size can fit in 1 Upload call, do it this way
|
||||
var body io.ReadSeeker = bytes.NewReader(b)
|
||||
if o.Progress != nil {
|
||||
body = pipeline.NewRequestBodyProgress(body, o.Progress)
|
||||
}
|
||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||
}
|
||||
|
||||
var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
|
||||
|
||||
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
||||
progress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||
operationName: "UploadBufferToBlockBlob",
|
||||
transferSize: bufferSize,
|
||||
chunkSize: o.BlockSize,
|
||||
parallelism: o.Parallelism,
|
||||
operation: func(offset int64, count int64) error {
|
||||
// This function is called once per block.
|
||||
// It is passed this block's offset within the buffer and its count of bytes
|
||||
// Prepare to read the proper block/section of the buffer
|
||||
var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count])
|
||||
blockNum := offset / o.BlockSize
|
||||
if o.Progress != nil {
|
||||
blockProgress := int64(0)
|
||||
body = pipeline.NewRequestBodyProgress(body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - blockProgress
|
||||
blockProgress = bytesTransferred
|
||||
progressLock.Lock() // 1 goroutine at a time gets a progress report
|
||||
progress += diff
|
||||
o.Progress(progress)
|
||||
progressLock.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil)
|
||||
return err
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// All put blocks were successful, call Put Block List to finalize the blob
|
||||
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions)
|
||||
}
|
||||
|
||||
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
|
||||
func UploadFileToBlockBlob(ctx context.Context, file *os.File,
|
||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := mmf{} // Default to an empty slice; used for 0-size file
|
||||
if stat.Size() != 0 {
|
||||
m, err = newMMF(file, false, 0, int(stat.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer m.unmap()
|
||||
}
|
||||
return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
||||
|
||||
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
||||
type DownloadFromBlobOptions struct {
|
||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
||||
BlockSize int64
|
||||
|
||||
// Progress is a function that is invoked periodically as bytes are received.
|
||||
Progress pipeline.ProgressReceiver
|
||||
|
||||
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
|
||||
AccessConditions BlobAccessConditions
|
||||
|
||||
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
|
||||
Parallelism uint16
|
||||
|
||||
// RetryReaderOptionsPerBlock is used when downloading each block.
|
||||
RetryReaderOptionsPerBlock RetryReaderOptions
|
||||
}
|
||||
|
||||
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
|
||||
if o.BlockSize == 0 {
|
||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
||||
}
|
||||
|
||||
if count == CountToEnd { // If size not specified, calculate it
|
||||
if initialDownloadResponse != nil {
|
||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
||||
} else {
|
||||
// If we don't have the length at all, get it
|
||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count = dr.ContentLength() - offset
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare and do parallel download.
|
||||
progress := int64(0)
|
||||
progressLock := &sync.Mutex{}
|
||||
|
||||
err := doBatchTransfer(ctx, batchTransferOptions{
|
||||
operationName: "downloadBlobToBuffer",
|
||||
transferSize: count,
|
||||
chunkSize: o.BlockSize,
|
||||
parallelism: o.Parallelism,
|
||||
operation: func(chunkStart int64, count int64) error {
|
||||
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false)
|
||||
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
||||
if o.Progress != nil {
|
||||
rangeProgress := int64(0)
|
||||
body = pipeline.NewResponseBodyProgress(
|
||||
body,
|
||||
func(bytesTransferred int64) {
|
||||
diff := bytesTransferred - rangeProgress
|
||||
rangeProgress = bytesTransferred
|
||||
progressLock.Lock()
|
||||
progress += diff
|
||||
o.Progress(progress)
|
||||
progressLock.Unlock()
|
||||
})
|
||||
}
|
||||
_, err = io.ReadFull(body, b[chunkStart:chunkStart+count])
|
||||
body.Close()
|
||||
return err
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
b []byte, o DownloadFromBlobOptions) error {
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil)
|
||||
}
|
||||
|
||||
// DownloadBlobToFile downloads an Azure blob to a local file.
|
||||
// The file would be truncated if the size doesn't match.
|
||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
||||
file *os.File, o DownloadFromBlobOptions) error {
|
||||
// 1. Calculate the size of the destination file
|
||||
var size int64
|
||||
|
||||
if count == CountToEnd {
|
||||
// Try to get Azure blob's size
|
||||
props, err := blobURL.GetProperties(ctx, o.AccessConditions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size = props.ContentLength() - offset
|
||||
} else {
|
||||
size = count
|
||||
}
|
||||
|
||||
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat.Size() != size {
|
||||
if err = file.Truncate(size); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// 3. Set mmap and call downloadBlobToBuffer.
|
||||
m, err := newMMF(file, true, 0, int(size))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.unmap()
|
||||
return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil)
|
||||
} else { // if the blob's size is 0, there is no need in downloading it
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// BatchTransferOptions identifies options used by doBatchTransfer.
|
||||
type batchTransferOptions struct {
|
||||
transferSize int64
|
||||
chunkSize int64
|
||||
parallelism uint16
|
||||
operation func(offset int64, chunkSize int64) error
|
||||
operationName string
|
||||
}
|
||||
|
||||
// doBatchTransfer helps to execute operations in a batch manner.
|
||||
func doBatchTransfer(ctx context.Context, o batchTransferOptions) error {
|
||||
// Prepare and do parallel operations.
|
||||
numChunks := uint16(((o.transferSize - 1) / o.chunkSize) + 1)
|
||||
operationChannel := make(chan func() error, o.parallelism) // Create the channel that release 'parallelism' goroutines concurrently
|
||||
operationResponseChannel := make(chan error, numChunks) // Holds each response
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Create the goroutines that process each operation (in parallel).
|
||||
if o.parallelism == 0 {
|
||||
o.parallelism = 5 // default parallelism
|
||||
}
|
||||
for g := uint16(0); g < o.parallelism; g++ {
|
||||
//grIndex := g
|
||||
go func() {
|
||||
for f := range operationChannel {
|
||||
//fmt.Printf("[%s] gr-%d start action\n", o.operationName, grIndex)
|
||||
err := f()
|
||||
operationResponseChannel <- err
|
||||
//fmt.Printf("[%s] gr-%d end action\n", o.operationName, grIndex)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Add each chunk's operation to the channel.
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
||||
curChunkSize := o.chunkSize
|
||||
|
||||
if chunkNum == numChunks-1 { // Last chunk
|
||||
curChunkSize = o.transferSize - (int64(chunkNum) * o.chunkSize) // Remove size of all transferred chunks from total
|
||||
}
|
||||
offset := int64(chunkNum) * o.chunkSize
|
||||
|
||||
operationChannel <- func() error {
|
||||
return o.operation(offset, curChunkSize)
|
||||
}
|
||||
}
|
||||
close(operationChannel)
|
||||
|
||||
// Wait for the operations to complete.
|
||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
||||
responseError := <-operationResponseChannel
|
||||
if responseError != nil {
|
||||
cancel() // As soon as any operation fails, cancel all remaining operation calls
|
||||
return responseError // No need to process anymore responses
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type UploadStreamToBlockBlobOptions struct {
|
||||
BufferSize int
|
||||
MaxBuffers int
|
||||
BlobHTTPHeaders BlobHTTPHeaders
|
||||
Metadata Metadata
|
||||
AccessConditions BlobAccessConditions
|
||||
}
|
||||
|
||||
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL,
|
||||
o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
|
||||
result, err := uploadStream(ctx, reader,
|
||||
UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers},
|
||||
&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(CommonResponse), nil
|
||||
}
|
||||
|
||||
type uploadStreamToBlockBlobOptions struct {
|
||||
b BlockBlobURL
|
||||
o UploadStreamToBlockBlobOptions
|
||||
blockIDPrefix uuid // UUID used with all blockIDs
|
||||
maxBlockNum uint32 // defaults to 0
|
||||
firstBlock []byte // Used only if maxBlockNum is 0
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error {
|
||||
if num == 0 {
|
||||
t.firstBlock = buffer
|
||||
|
||||
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
|
||||
// If the payload is exactly the same size as the buffer, there may be more content coming in.
|
||||
if len(buffer) < t.o.BufferSize {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Else, upload a staged block...
|
||||
atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) {
|
||||
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
|
||||
if startVal < num {
|
||||
return num, nil
|
||||
}
|
||||
return startVal, nil
|
||||
})
|
||||
blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64()
|
||||
_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) {
|
||||
// If the first block had the exact same size as the buffer
|
||||
// we would have staged it as a block thinking that there might be more data coming
|
||||
if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize {
|
||||
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
|
||||
return t.b.Upload(ctx, bytes.NewReader(t.firstBlock),
|
||||
t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
||||
}
|
||||
// Multiple blocks staged, commit them all now
|
||||
blockID := newUuidBlockID(t.blockIDPrefix)
|
||||
blockIDs := make([]string, t.maxBlockNum+1)
|
||||
for bn := uint32(0); bn <= t.maxBlockNum; bn++ {
|
||||
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
|
||||
}
|
||||
return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type iTransfer interface {
|
||||
start(ctx context.Context) (interface{}, error)
|
||||
chunk(ctx context.Context, num uint32, buffer []byte) error
|
||||
end(ctx context.Context) (interface{}, error)
|
||||
}
|
||||
|
||||
type UploadStreamOptions struct {
|
||||
MaxBuffers int
|
||||
BufferSize int
|
||||
}
|
||||
|
||||
type firstErr struct {
|
||||
lock sync.Mutex
|
||||
finalError error
|
||||
}
|
||||
|
||||
func (fe *firstErr) set(err error) {
|
||||
fe.lock.Lock()
|
||||
if fe.finalError == nil {
|
||||
fe.finalError = err
|
||||
}
|
||||
fe.lock.Unlock()
|
||||
}
|
||||
|
||||
func (fe *firstErr) get() (err error) {
|
||||
fe.lock.Lock()
|
||||
err = fe.finalError
|
||||
fe.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) {
|
||||
firstErr := firstErr{}
|
||||
ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
|
||||
defer cancel()
|
||||
wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
|
||||
type OutgoingMsg struct {
|
||||
chunkNum uint32
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
// Create a channel to hold the buffers usable for incoming datsa
|
||||
incoming := make(chan []byte, o.MaxBuffers)
|
||||
outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
|
||||
if result, err := t.start(ctx); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
numBuffers := 0 // The number of buffers & out going goroutines created so far
|
||||
injectBuffer := func() {
|
||||
// For each Buffer, create it and a goroutine to upload it
|
||||
incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
|
||||
numBuffers++
|
||||
go func() {
|
||||
for outgoingMsg := range outgoing {
|
||||
// Upload the outgoing buffer
|
||||
err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer)
|
||||
wg.Done() // Indicate this buffer was sent
|
||||
if nil != err {
|
||||
// NOTE: finalErr could be assigned to multiple times here which is OK,
|
||||
// some error will be returned.
|
||||
firstErr.set(err)
|
||||
cancel()
|
||||
}
|
||||
incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
|
||||
}
|
||||
}()
|
||||
}
|
||||
injectBuffer() // Create our 1st buffer & outgoing goroutine
|
||||
|
||||
// This goroutine grabs a buffer, reads from the stream into the buffer,
|
||||
// and inserts the buffer into the outgoing channel to be uploaded
|
||||
for c := uint32(0); true; c++ { // Iterate once per chunk
|
||||
var buffer []byte
|
||||
if numBuffers < o.MaxBuffers {
|
||||
select {
|
||||
// We're not at max buffers, see if a previously-created buffer is available
|
||||
case buffer = <-incoming:
|
||||
break
|
||||
default:
|
||||
// No buffer available; inject a new buffer & go routine to process it
|
||||
injectBuffer()
|
||||
buffer = <-incoming // Grab the just-injected buffer
|
||||
}
|
||||
} else {
|
||||
// We are at max buffers, block until we get to reuse one
|
||||
buffer = <-incoming
|
||||
}
|
||||
n, err := io.ReadFull(reader, buffer)
|
||||
if err != nil { // Less than len(buffer) bytes were read
|
||||
buffer = buffer[:n] // Make slice match the # of read bytes
|
||||
}
|
||||
if len(buffer) > 0 {
|
||||
// Buffer not empty, upload it
|
||||
wg.Add(1) // We're posting a buffer to be sent
|
||||
outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer}
|
||||
}
|
||||
if err != nil { // The reader is done, no more outgoing buffers
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
|
||||
} else {
|
||||
firstErr.set(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
|
||||
close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
|
||||
wg.Wait() // Wait for all pending outgoing messages to complete
|
||||
err := firstErr.get()
|
||||
if err == nil {
|
||||
// If no error, after all blocks uploaded, commit them to the blob & return the result
|
||||
return t.end(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
snapshot = "snapshot"
|
||||
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
|
||||
)
|
||||
|
||||
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
|
||||
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
|
||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
||||
type BlobURLParts struct {
|
||||
Scheme string // Ex: "https://"
|
||||
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
||||
IPEndpointStyleInfo IPEndpointStyleInfo
|
||||
ContainerName string // "" if no container
|
||||
BlobName string // "" if no blob
|
||||
Snapshot string // "" if not a snapshot
|
||||
SAS SASQueryParameters
|
||||
UnparsedParams string
|
||||
}
|
||||
|
||||
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
||||
// Ex: "https://10.132.141.33/accountname/containername"
|
||||
type IPEndpointStyleInfo struct {
|
||||
AccountName string // "" if not using IP endpoint style
|
||||
}
|
||||
|
||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
||||
// http(s)://IP(:port)/storageaccount/container/...
|
||||
// As url's Host property, host could be both host or host:port
|
||||
func isIPEndpointStyle(host string) bool {
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
||||
// In this case, eliminate the '[' and ']' in the URL.
|
||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
return net.ParseIP(host) != nil
|
||||
}
|
||||
|
||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
||||
func NewBlobURLParts(u url.URL) BlobURLParts {
|
||||
up := BlobURLParts{
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
|
||||
// Find the container & blob names (if any)
|
||||
if u.Path != "" {
|
||||
path := u.Path
|
||||
if path[0] == '/' {
|
||||
path = path[1:] // If path starts with a slash, remove it
|
||||
}
|
||||
if isIPEndpointStyle(up.Host) {
|
||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
||||
up.IPEndpointStyleInfo.AccountName = path
|
||||
} else {
|
||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
||||
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
||||
}
|
||||
}
|
||||
|
||||
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
||||
up.ContainerName = path
|
||||
} else {
|
||||
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
|
||||
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the query parameters to a case-sensitive map & trim whitespace
|
||||
paramsMap := u.Query()
|
||||
|
||||
up.Snapshot = "" // Assume no snapshot
|
||||
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
|
||||
up.Snapshot = snapshotStr[0]
|
||||
// If we recognized the query parameter, remove it from the map
|
||||
delete(paramsMap, snapshot)
|
||||
}
|
||||
up.SAS = newSASQueryParameters(paramsMap, true)
|
||||
up.UnparsedParams = paramsMap.Encode()
|
||||
return up
|
||||
}
|
||||
|
||||
type caseInsensitiveValues url.Values // map[string][]string
|
||||
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
||||
key = strings.ToLower(key)
|
||||
for k, v := range values {
|
||||
if strings.ToLower(k) == key {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return []string{}, false
|
||||
}
|
||||
|
||||
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
|
||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
||||
func (up BlobURLParts) URL() url.URL {
|
||||
path := ""
|
||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
||||
path += "/" + up.IPEndpointStyleInfo.AccountName
|
||||
}
|
||||
// Concatenate container & blob names (if they exist)
|
||||
if up.ContainerName != "" {
|
||||
path += "/" + up.ContainerName
|
||||
if up.BlobName != "" {
|
||||
path += "/" + up.BlobName
|
||||
}
|
||||
}
|
||||
|
||||
rawQuery := up.UnparsedParams
|
||||
|
||||
// Concatenate blob snapshot query parameter (if it exists)
|
||||
if up.Snapshot != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += snapshot + "=" + up.Snapshot
|
||||
}
|
||||
sas := up.SAS.Encode()
|
||||
if sas != "" {
|
||||
if len(rawQuery) > 0 {
|
||||
rawQuery += "&"
|
||||
}
|
||||
rawQuery += sas
|
||||
}
|
||||
u := url.URL{
|
||||
Scheme: up.Scheme,
|
||||
Host: up.Host,
|
||||
Path: path,
|
||||
RawQuery: rawQuery,
|
||||
}
|
||||
return u
|
||||
}
|
|
@ -0,0 +1,210 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
||||
type BlobSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"`
|
||||
Identifier string `param:"si"`
|
||||
ContainerName string
|
||||
BlobName string // Use "" to create a Container SAS
|
||||
CacheControl string // rscc
|
||||
ContentDisposition string // rscd
|
||||
ContentEncoding string // rsce
|
||||
ContentLanguage string // rscl
|
||||
ContentType string // rsct
|
||||
}
|
||||
|
||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
|
||||
resource := "c"
|
||||
if v.BlobName == "" {
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &ContainerSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
} else {
|
||||
resource = "b"
|
||||
// Make sure the permission characters are in the correct order
|
||||
perms := &BlobSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
}
|
||||
if v.Version == "" {
|
||||
v.Version = SASVersion
|
||||
}
|
||||
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
|
||||
|
||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
stringToSign := strings.Join([]string{
|
||||
v.Permissions,
|
||||
startTime,
|
||||
expiryTime,
|
||||
getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName),
|
||||
v.Identifier,
|
||||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
resource,
|
||||
"", // signed timestamp, @TODO add for snapshot sas feature
|
||||
v.CacheControl, // rscc
|
||||
v.ContentDisposition, // rscd
|
||||
v.ContentEncoding, // rsce
|
||||
v.ContentLanguage, // rscl
|
||||
v.ContentType}, // rsct
|
||||
"\n")
|
||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
|
||||
|
||||
p := SASQueryParameters{
|
||||
// Common SAS parameters
|
||||
version: v.Version,
|
||||
protocol: v.Protocol,
|
||||
startTime: v.StartTime,
|
||||
expiryTime: v.ExpiryTime,
|
||||
permissions: v.Permissions,
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Container/Blob-specific SAS parameters
|
||||
resource: resource,
|
||||
identifier: v.Identifier,
|
||||
cacheControl: v.CacheControl,
|
||||
contentDisposition: v.ContentDisposition,
|
||||
contentEncoding: v.ContentEncoding,
|
||||
contentLanguage: v.ContentLanguage,
|
||||
contentType: v.ContentType,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
||||
func getCanonicalName(account string, containerName string, blobName string) string {
|
||||
// Container: "/blob/account/containername"
|
||||
// Blob: "/blob/account/containername/blobname"
|
||||
elements := []string{"/blob/", account, "/", containerName}
|
||||
if blobName != "" {
|
||||
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
|
||||
}
|
||||
return strings.Join(elements, "")
|
||||
}
|
||||
|
||||
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type ContainerSASPermissions struct {
|
||||
Read, Add, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage container.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p ContainerSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the ContainerSASPermissions's fields from a string.
|
||||
func (p *ContainerSASPermissions) Parse(s string) error {
|
||||
*p = ContainerSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
|
||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
||||
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool }
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage blob.
|
||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
||||
func (p BlobSASPermissions) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the BlobSASPermissions's fields from a string.
|
||||
func (p *BlobSASPermissions) Parse(s string) error {
|
||||
*p = BlobSASPermissions{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
package azblob
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
|
||||
|
||||
// ServiceCode values indicate a service failure.
|
||||
const (
|
||||
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
|
||||
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet"
|
||||
|
||||
// ServiceCodeBlobAlreadyExists means the specified blob already exists.
|
||||
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists"
|
||||
|
||||
// ServiceCodeBlobNotFound means the specified blob does not exist.
|
||||
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound"
|
||||
|
||||
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
|
||||
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten"
|
||||
|
||||
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
|
||||
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength"
|
||||
|
||||
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
|
||||
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
|
||||
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit"
|
||||
|
||||
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
|
||||
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong"
|
||||
|
||||
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
|
||||
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier"
|
||||
|
||||
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
|
||||
// Examine the HTTP status code and message for more information about the failure.
|
||||
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource"
|
||||
|
||||
// ServiceCodeContainerAlreadyExists means the specified container already exists.
|
||||
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists"
|
||||
|
||||
// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
|
||||
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted"
|
||||
|
||||
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
|
||||
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled"
|
||||
|
||||
// ServiceCodeContainerNotFound means the specified container does not exist.
|
||||
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound"
|
||||
|
||||
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
|
||||
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit"
|
||||
|
||||
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
|
||||
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported"
|
||||
|
||||
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
|
||||
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch"
|
||||
|
||||
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
|
||||
// that the operation for AppendBlob requires at least version 2015-02-21.
|
||||
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch"
|
||||
|
||||
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
|
||||
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
|
||||
|
||||
// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
|
||||
ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
|
||||
|
||||
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
|
||||
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
|
||||
|
||||
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
|
||||
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired"
|
||||
|
||||
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
|
||||
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock"
|
||||
|
||||
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
|
||||
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType"
|
||||
|
||||
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
|
||||
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId"
|
||||
|
||||
// ServiceCodeInvalidBlockList means the specified block list is invalid.
|
||||
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList"
|
||||
|
||||
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
|
||||
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation"
|
||||
|
||||
// ServiceCodeInvalidPageRange means the page range specified is invalid.
|
||||
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange"
|
||||
|
||||
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
|
||||
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType"
|
||||
|
||||
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
|
||||
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl"
|
||||
|
||||
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
|
||||
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
|
||||
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent"
|
||||
|
||||
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
|
||||
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
|
||||
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
|
||||
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
|
||||
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation"
|
||||
|
||||
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
|
||||
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing"
|
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired"
|
||||
|
||||
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
|
||||
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged"
|
||||
|
||||
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
|
||||
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed"
|
||||
|
||||
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
|
||||
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
|
||||
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
|
||||
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation"
|
||||
|
||||
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
|
||||
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation"
|
||||
|
||||
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
|
||||
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet"
|
||||
|
||||
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
|
||||
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation"
|
||||
|
||||
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
|
||||
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob"
|
||||
|
||||
// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
|
||||
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation"
|
||||
|
||||
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
|
||||
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer"
|
||||
|
||||
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
|
||||
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound"
|
||||
|
||||
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
|
||||
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported"
|
||||
|
||||
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
|
||||
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet"
|
||||
|
||||
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
|
||||
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge"
|
||||
|
||||
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
|
||||
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded"
|
||||
|
||||
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
|
||||
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded"
|
||||
|
||||
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
|
||||
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent"
|
||||
|
||||
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet"
|
||||
|
||||
// ServiceCodeSystemInUse means this blob is in use by the system.
|
||||
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse"
|
||||
|
||||
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
|
||||
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet"
|
||||
|
||||
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
|
||||
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite"
|
||||
|
||||
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
|
||||
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated"
|
||||
|
||||
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
|
||||
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived"
|
||||
|
||||
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
|
||||
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived"
|
||||
)
|
|
@ -0,0 +1,127 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
|
||||
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
|
||||
|
||||
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
|
||||
AppendBlobMaxBlocks = 50000
|
||||
)
|
||||
|
||||
// AppendBlobURL defines a set of operations applicable to append blobs.
|
||||
type AppendBlobURL struct {
|
||||
BlobURL
|
||||
abClient appendBlobClient
|
||||
}
|
||||
|
||||
// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
abClient := newAppendBlobClient(url, p)
|
||||
return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL {
|
||||
return NewAppendBlobURL(ab.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
||||
p := NewBlobURLParts(ab.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return ab.abClient.Create(ctx, 0, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil)
|
||||
}
|
||||
|
||||
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ab.abClient.AppendBlock(ctx, body, count, nil,
|
||||
transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
||||
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
||||
transactionalMD5, nil, transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
type AppendBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
AppendPositionAccessConditions
|
||||
}
|
||||
|
||||
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
||||
type AppendPositionAccessConditions struct {
|
||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append position is equal to a value.
|
||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
||||
// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
|
||||
// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
|
||||
IfAppendPositionEqual int64
|
||||
|
||||
// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
|
||||
// only if the append blob's size is less than or equal to a value.
|
||||
// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
|
||||
// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
|
||||
// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
|
||||
IfMaxSizeLessThanOrEqual int64
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
||||
var zero int64 // defaults to 0
|
||||
switch ac.IfAppendPositionEqual {
|
||||
case -1:
|
||||
iape = &zero
|
||||
case 0:
|
||||
iape = nil
|
||||
default:
|
||||
iape = &ac.IfAppendPositionEqual
|
||||
}
|
||||
|
||||
switch ac.IfMaxSizeLessThanOrEqual {
|
||||
case -1:
|
||||
imsltoe = &zero
|
||||
case 0:
|
||||
imsltoe = nil
|
||||
default:
|
||||
imsltoe = &ac.IfMaxSizeLessThanOrEqual
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
||||
type BlobURL struct {
|
||||
blobClient blobClient
|
||||
}
|
||||
|
||||
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
return BlobURL{blobClient: blobClient}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the BlobURL object.
|
||||
func (b BlobURL) URL() url.URL {
|
||||
return b.blobClient.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (b BlobURL) String() string {
|
||||
u := b.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
||||
return NewBlobURL(b.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
|
||||
p := NewBlobURLParts(b.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
|
||||
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToBlockBlobURL() BlockBlobURL {
|
||||
return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
|
||||
func (b BlobURL) ToPageBlobURL() PageBlobURL {
|
||||
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
||||
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) {
|
||||
var xRangeGetContentMD5 *bool
|
||||
if rangeGetContentMD5 {
|
||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
dr, err := b.blobClient.Download(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DownloadResponse{
|
||||
b: b,
|
||||
r: dr,
|
||||
ctx: ctx,
|
||||
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()},
|
||||
}, err
|
||||
}
|
||||
|
||||
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
|
||||
// Note that deleting a blob also deletes all its snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
|
||||
func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
|
||||
return b.blobClient.Undelete(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// SetTier operation sets the tier on a blob. The operation is allowed on a page
|
||||
// blob in a premium storage account and on a block blob in a blob storage account (locally
|
||||
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
|
||||
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
|
||||
// does not update the blob's ETag.
|
||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
|
||||
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
|
||||
}
|
||||
|
||||
// GetBlobProperties returns the blob's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// SetBlobHTTPHeaders changes a blob's HTTP headers.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
&h.ContentDisposition, nil)
|
||||
}
|
||||
|
||||
// SetBlobMetadata changes a blob's metadata.
|
||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a read-only snapshot of a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
|
||||
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) {
|
||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
||||
// performance hit.
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
||||
// 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// RenewLease renews the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ReleaseLease releases the blob's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ChangeLease changes the blob's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
|
||||
const LeaseBreakNaturally = -1
|
||||
|
||||
func leasePeriodPointer(period int32) (p *int32) {
|
||||
if period != LeaseBreakNaturally {
|
||||
p = &period
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) {
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
||||
|
||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
||||
srcIfModifiedSince, srcIfUnmodifiedSince,
|
||||
srcIfMatchETag, srcIfNoneMatchETag,
|
||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
||||
dstIfMatchETag, dstIfNoneMatchETag,
|
||||
dstLeaseID, nil)
|
||||
}
|
||||
|
||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
|
||||
func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) {
|
||||
return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil)
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
||||
|
||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
||||
BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
|
||||
|
||||
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
|
||||
BlockBlobMaxBlocks = 50000
|
||||
)
|
||||
|
||||
// BlockBlobURL defines a set of operations applicable to block blobs.
|
||||
type BlockBlobURL struct {
|
||||
BlobURL
|
||||
bbClient blockBlobClient
|
||||
}
|
||||
|
||||
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
bbClient := newBlockBlobClient(url, p)
|
||||
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL {
|
||||
return NewBlockBlobURL(bb.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
||||
p := NewBlobURLParts(bb.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// Upload creates a new block blob or overwrites an existing block blob.
|
||||
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
|
||||
// supported with Upload; the content of the existing blob is overwritten with the new content. To
|
||||
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.Upload(ctx, body, count, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
nil)
|
||||
}
|
||||
|
||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, ac LeaseAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
||||
// In order to be written as part of a blob, a block must have been successfully written
|
||||
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
|
||||
// by uploading only those blocks that have changed, then committing the new and existing
|
||||
// blocks together. Any blocks not specified in the block list and permanently deleted.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders,
|
||||
metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
|
||||
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
|
||||
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type BlockID [64]byte
|
||||
|
||||
func (blockID BlockID) ToBase64() string {
|
||||
return base64.StdEncoding.EncodeToString(blockID[:])
|
||||
}
|
||||
|
||||
func (blockID *BlockID) FromBase64(s string) error {
|
||||
*blockID = BlockID{} // Zero out the block ID
|
||||
_, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s))
|
||||
return err
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type uuidBlockID BlockID
|
||||
|
||||
func (ubi uuidBlockID) UUID() uuid {
|
||||
u := uuid{}
|
||||
copy(u[:], ubi[:len(u)])
|
||||
return u
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) Number() uint32 {
|
||||
return binary.BigEndian.Uint32(ubi[len(uuid{}):])
|
||||
}
|
||||
|
||||
func newUuidBlockID(u uuid) uuidBlockID {
|
||||
ubi := uuidBlockID{} // Create a new uuidBlockID
|
||||
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
|
||||
// Block number defaults to 0
|
||||
return ubi
|
||||
}
|
||||
|
||||
func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID {
|
||||
copy(ubi[:len(u)], u[:])
|
||||
return ubi
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
|
||||
binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
|
||||
return ubi // Return the passed-in copy
|
||||
}
|
||||
|
||||
func (ubi uuidBlockID) ToBase64() string {
|
||||
return BlockID(ubi).ToBase64()
|
||||
}
|
||||
|
||||
func (ubi *uuidBlockID) FromBase64(s string) error {
|
||||
return (*BlockID)(ubi).FromBase64(s)
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||
type ContainerURL struct {
|
||||
client containerClient
|
||||
}
|
||||
|
||||
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
||||
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
|
||||
client := newContainerClient(url, p)
|
||||
return ContainerURL{client: client}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ContainerURL object.
|
||||
func (c ContainerURL) URL() url.URL {
|
||||
return c.client.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (c ContainerURL) String() string {
|
||||
u := c.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
|
||||
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
|
||||
return NewContainerURL(c.URL(), p)
|
||||
}
|
||||
|
||||
// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
|
||||
// NewBlobURL method.
|
||||
func (c ContainerURL) NewBlobURL(blobName string) BlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
|
||||
// NewAppendBlobURL method.
|
||||
func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewAppendBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
|
||||
// NewBlockBlobURL method.
|
||||
func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewBlockBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
|
||||
// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
|
||||
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
|
||||
// NewPageBlobURL method.
|
||||
func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
|
||||
blobURL := appendToURLPath(c.URL(), blobName)
|
||||
return NewPageBlobURL(blobURL, c.client.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
|
||||
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
|
||||
return c.client.Create(ctx, nil, metadata, publicAccessType, nil)
|
||||
}
|
||||
|
||||
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
||||
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
|
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// GetProperties returns the container's properties.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
|
||||
func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) {
|
||||
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
|
||||
// This allows us to not expose a GetProperties method at all simplifying the API.
|
||||
return c.client.GetProperties(ctx, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// SetMetadata sets the container's metadata.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
||||
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
|
||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
||||
}
|
||||
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
||||
}
|
||||
|
||||
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
|
||||
func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) {
|
||||
return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil)
|
||||
}
|
||||
|
||||
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
|
||||
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
|
||||
type AccessPolicyPermission struct {
|
||||
Read, Add, Create, Write, Delete, List bool
|
||||
}
|
||||
|
||||
// String produces the access policy permission string for an Azure Storage container.
|
||||
// Call this method to set AccessPolicy's Permission field.
|
||||
func (p AccessPolicyPermission) String() string {
|
||||
var b bytes.Buffer
|
||||
if p.Read {
|
||||
b.WriteRune('r')
|
||||
}
|
||||
if p.Add {
|
||||
b.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
b.WriteRune('c')
|
||||
}
|
||||
if p.Write {
|
||||
b.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
b.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
b.WriteRune('l')
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccessPolicyPermission's fields from a string.
|
||||
func (p *AccessPolicyPermission) Parse(s string) error {
|
||||
*p = AccessPolicyPermission{} // Clear the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
default:
|
||||
return fmt.Errorf("invalid permission: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
|
||||
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
|
||||
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
|
||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// RenewLease renews the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ReleaseLease releases the container's previously-acquired lease.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ChangeLease changes the container's lease ID.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
||||
}
|
||||
|
||||
// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
||||
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
|
||||
if o.Details.Snapshots {
|
||||
return nil, errors.New("snapshots are not supported in this listing operation")
|
||||
}
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
||||
type ListBlobsSegmentOptions struct {
|
||||
Details BlobListingDetails // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
|
||||
// SetMaxResults sets the maximum desired results you want the service to return. Note, the
|
||||
// service may return fewer results than requested.
|
||||
// MaxResults=0 means no 'MaxResults' header specified.
|
||||
MaxResults int32
|
||||
}
|
||||
|
||||
func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
include = o.Details.slice()
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BlobListingDetails indicates what additional information the service should return with each blob.
|
||||
type BlobListingDetails struct {
|
||||
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool
|
||||
}
|
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
|
||||
items := []ListBlobsIncludeItemType{}
|
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Copy {
|
||||
items = append(items, ListBlobsIncludeItemCopy)
|
||||
}
|
||||
if d.Deleted {
|
||||
items = append(items, ListBlobsIncludeItemDeleted)
|
||||
}
|
||||
if d.Metadata {
|
||||
items = append(items, ListBlobsIncludeItemMetadata)
|
||||
}
|
||||
if d.Snapshots {
|
||||
items = append(items, ListBlobsIncludeItemSnapshots)
|
||||
}
|
||||
if d.UncommittedBlobs {
|
||||
items = append(items, ListBlobsIncludeItemUncommittedblobs)
|
||||
}
|
||||
return items
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// PageBlobPageBytes indicates the number of bytes in a page (512).
|
||||
PageBlobPageBytes = 512
|
||||
|
||||
// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
|
||||
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
|
||||
)
|
||||
|
||||
// PageBlobURL defines a set of operations applicable to page blobs.
|
||||
type PageBlobURL struct {
|
||||
BlobURL
|
||||
pbClient pageBlobClient
|
||||
}
|
||||
|
||||
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
||||
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
|
||||
blobClient := newBlobClient(url, p)
|
||||
pbClient := newPageBlobClient(url, p)
|
||||
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
|
||||
}
|
||||
|
||||
// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
|
||||
func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL {
|
||||
return NewPageBlobURL(pb.blobClient.URL(), p)
|
||||
}
|
||||
|
||||
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
||||
func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
||||
p := NewBlobURLParts(pb.URL())
|
||||
p.Snapshot = snapshot
|
||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
||||
}
|
||||
|
||||
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Create(ctx, 0, size, nil,
|
||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
||||
metadata, ac.LeaseAccessConditions.pointers(),
|
||||
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil)
|
||||
}
|
||||
|
||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
||||
// This method panics if the stream is not at position 0.
|
||||
// Note that the http client closes the body stream after the request is sent to the service.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) {
|
||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
||||
// The sourceOffset specifies the start offset of source data to copy from.
|
||||
// The destOffset specifies the start offset of data in page blob will be written to.
|
||||
// The count must be a multiple of 512 bytes.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
||||
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ClearPages frees the specified pages from the page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
|
||||
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
||||
httpRange{offset: offset, count: count}.pointers(),
|
||||
ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
||||
nil)
|
||||
}
|
||||
|
||||
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// SetSequenceNumber sets the page blob's sequence number.
|
||||
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
|
||||
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
|
||||
sn := &sequenceNumber
|
||||
if action == SequenceNumberActionIncrement {
|
||||
sn = nil
|
||||
}
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
||||
sn, nil)
|
||||
}
|
||||
|
||||
// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
|
||||
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
qp := source.Query()
|
||||
qp.Set("snapshot", snapshot)
|
||||
source.RawQuery = qp.Encode()
|
||||
return pb.pbClient.CopyIncremental(ctx, source.String(), nil,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
func (pr PageRange) pointers() *string {
|
||||
endOffset := strconv.FormatInt(int64(pr.End), 10)
|
||||
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
|
||||
return &asString
|
||||
}
|
||||
|
||||
type PageBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
SequenceNumberAccessConditions
|
||||
}
|
||||
|
||||
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
||||
type SequenceNumberAccessConditions struct {
|
||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than a value.
|
||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
||||
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
|
||||
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
|
||||
IfSequenceNumberLessThan int64
|
||||
|
||||
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is less than or equal to a value.
|
||||
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
|
||||
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
|
||||
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
|
||||
IfSequenceNumberLessThanOrEqual int64
|
||||
|
||||
// IfSequenceNumberEqual ensures that the page blob operation succeeds
|
||||
// only if the blob's sequence number is equal to a value.
|
||||
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
|
||||
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
|
||||
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
|
||||
IfSequenceNumberEqual int64
|
||||
}
|
||||
|
||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
||||
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
||||
var zero int64 // Defaults to 0
|
||||
switch ac.IfSequenceNumberLessThan {
|
||||
case -1:
|
||||
snlt = &zero
|
||||
case 0:
|
||||
snlt = nil
|
||||
default:
|
||||
snlt = &ac.IfSequenceNumberLessThan
|
||||
}
|
||||
|
||||
switch ac.IfSequenceNumberLessThanOrEqual {
|
||||
case -1:
|
||||
snltoe = &zero
|
||||
case 0:
|
||||
snltoe = nil
|
||||
default:
|
||||
snltoe = &ac.IfSequenceNumberLessThanOrEqual
|
||||
}
|
||||
switch ac.IfSequenceNumberEqual {
|
||||
case -1:
|
||||
sne = &zero
|
||||
case 0:
|
||||
sne = nil
|
||||
default:
|
||||
sne = &ac.IfSequenceNumberEqual
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
|
||||
ContainerNameRoot = "$root"
|
||||
|
||||
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
|
||||
ContainerNameLogs = "$logs"
|
||||
)
|
||||
|
||||
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
|
||||
type ServiceURL struct {
|
||||
client serviceClient
|
||||
}
|
||||
|
||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
||||
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
|
||||
client := newServiceClient(primaryURL, p)
|
||||
return ServiceURL{client: client}
|
||||
}
|
||||
|
||||
// URL returns the URL endpoint used by the ServiceURL object.
|
||||
func (s ServiceURL) URL() url.URL {
|
||||
return s.client.URL()
|
||||
}
|
||||
|
||||
// String returns the URL as a string.
|
||||
func (s ServiceURL) String() string {
|
||||
u := s.URL()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
|
||||
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
|
||||
return NewServiceURL(s.URL(), p)
|
||||
}
|
||||
|
||||
// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
|
||||
// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
|
||||
// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
|
||||
// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
|
||||
// NewContainerURL method.
|
||||
func (s ServiceURL) NewContainerURL(containerName string) ContainerURL {
|
||||
containerURL := appendToURLPath(s.URL(), containerName)
|
||||
return NewContainerURL(containerURL, s.client.Pipeline())
|
||||
}
|
||||
|
||||
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
|
||||
func appendToURLPath(u url.URL, name string) url.URL {
|
||||
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
|
||||
// When you call url.Parse() this is what you'll get:
|
||||
// Scheme: "https"
|
||||
// Opaque: ""
|
||||
// User: nil
|
||||
// Host: "ms.com"
|
||||
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
|
||||
// RawPath: ""
|
||||
// ForceQuery: false
|
||||
// RawQuery: "k1=v1&k2=v2"
|
||||
// Fragment: "f"
|
||||
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
|
||||
u.Path += "/" // Append "/" to end before appending name
|
||||
}
|
||||
u.Path += name
|
||||
return u
|
||||
}
|
||||
|
||||
// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
|
||||
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
|
||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
||||
// previously-returned Marker) to get the next segment. For more information, see
|
||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
||||
prefix, include, maxResults := o.pointers()
|
||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
||||
}
|
||||
|
||||
// ListContainersOptions defines options available when calling ListContainers.
|
||||
type ListContainersSegmentOptions struct {
|
||||
Detail ListContainersDetail // No IncludeType header is produced if ""
|
||||
Prefix string // No Prefix header is produced if ""
|
||||
MaxResults int32 // 0 means unspecified
|
||||
// TODO: update swagger to generate this type?
|
||||
}
|
||||
|
||||
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) {
|
||||
if o.Prefix != "" {
|
||||
prefix = &o.Prefix
|
||||
}
|
||||
if o.MaxResults != 0 {
|
||||
maxResults = &o.MaxResults
|
||||
}
|
||||
include = ListContainersIncludeType(o.Detail.string())
|
||||
return
|
||||
}
|
||||
|
||||
// ListContainersFlatDetail indicates what additional information the service should return with each container.
|
||||
type ListContainersDetail struct {
|
||||
// Tells the service whether to return metadata for each container.
|
||||
Metadata bool
|
||||
}
|
||||
|
||||
// string produces the Include query parameter's value.
|
||||
func (d *ListContainersDetail) string() string {
|
||||
items := make([]string, 0, 1)
|
||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
||||
if d.Metadata {
|
||||
items = append(items, string(ListContainersIncludeMetadata))
|
||||
}
|
||||
if len(items) > 0 {
|
||||
return strings.Join(items, ",")
|
||||
}
|
||||
return string(ListContainersIncludeNone)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
|
||||
return bsu.client.GetProperties(ctx, nil, nil)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
|
||||
return bsu.client.SetProperties(ctx, properties, nil, nil)
|
||||
}
|
||||
|
||||
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
|
||||
return bsu.client.GetStatistics(ctx, nil, nil)
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.6"
|
|
@ -0,0 +1,196 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
||||
// storage account's name and either its primary or secondary key.
|
||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return &SharedKeyCredential{}, err
|
||||
}
|
||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
||||
}
|
||||
|
||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
||||
// It is immutable making it shareable and goroutine-safe.
|
||||
type SharedKeyCredential struct {
|
||||
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
|
||||
accountName string
|
||||
accountKey []byte
|
||||
}
|
||||
|
||||
// AccountName returns the Storage account's name.
|
||||
func (f SharedKeyCredential) AccountName() string {
|
||||
return f.accountName
|
||||
}
|
||||
|
||||
// New creates a credential policy object.
|
||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// Add a x-ms-date header if it doesn't already exist
|
||||
if d := request.Header.Get(headerXmsDate); d == "" {
|
||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
||||
}
|
||||
stringToSign, err := f.buildStringToSign(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signature := f.ComputeHMACSHA256(stringToSign)
|
||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
||||
request.Header[headerAuthorization] = []string{authHeader}
|
||||
|
||||
response, err := next.Do(ctx, request)
|
||||
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
|
||||
// Service failed to authenticate request, log it
|
||||
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
|
||||
}
|
||||
return response, err
|
||||
})
|
||||
}
|
||||
|
||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
||||
func (*SharedKeyCredential) credentialMarker() {}
|
||||
|
||||
// Constants ensuring that header names are correctly spelled and consistently cased.
|
||||
const (
|
||||
headerAuthorization = "Authorization"
|
||||
headerCacheControl = "Cache-Control"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentDisposition = "Content-Disposition"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentLength = "Content-Length"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerContentType = "Content-Type"
|
||||
headerDate = "Date"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
headerUserAgent = "User-Agent"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
)
|
||||
|
||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
||||
func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
||||
h := hmac.New(sha256.New, f.accountKey)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
headers := request.Header
|
||||
contentLength := headers.Get(headerContentLength)
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
|
||||
canonicalizedResource, err := f.buildCanonicalizedResource(request.URL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
request.Method,
|
||||
headers.Get(headerContentEncoding),
|
||||
headers.Get(headerContentLanguage),
|
||||
contentLength,
|
||||
headers.Get(headerContentMD5),
|
||||
headers.Get(headerContentType),
|
||||
"", // Empty date because x-ms-date is expected (as per web page above)
|
||||
headers.Get(headerIfModifiedSince),
|
||||
headers.Get(headerIfMatch),
|
||||
headers.Get(headerIfNoneMatch),
|
||||
headers.Get(headerIfUnmodifiedSince),
|
||||
headers.Get(headerRange),
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
return stringToSign, nil
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers http.Header) string {
|
||||
cm := map[string][]string{}
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
|
||||
}
|
||||
}
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cm))
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
ch := bytes.NewBufferString("")
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(strings.Join(cm[key], ","))
|
||||
}
|
||||
return string(ch.Bytes())
|
||||
}
|
||||
|
||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(f.accountName)
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
} else {
|
||||
// a slash is required to indicate the root path
|
||||
cr.WriteString("/")
|
||||
}
|
||||
|
||||
// params is a map[string][]string; param name is key; params values is []string
|
||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
||||
if err != nil {
|
||||
return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code")
|
||||
}
|
||||
|
||||
if len(params) > 0 { // There is at least 1 query parameter
|
||||
paramNames := []string{} // We use this to sort the parameter key names
|
||||
for paramName := range params {
|
||||
paramNames = append(paramNames, paramName) // paramNames must be lowercase
|
||||
}
|
||||
sort.Strings(paramNames)
|
||||
|
||||
for _, paramName := range paramNames {
|
||||
paramValues := params[paramName]
|
||||
sort.Strings(paramValues)
|
||||
|
||||
// Join the sorted key values separated by ','
|
||||
// Then prepend "keyName:"; then add this string to the buffer
|
||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
||||
}
|
||||
}
|
||||
return string(cr.Bytes()), nil
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package azblob
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
|
||||
if writable {
|
||||
prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED
|
||||
}
|
||||
addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags)
|
||||
return mmf(addr), err
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
err := syscall.Munmap(*m)
|
||||
*m = nil
|
||||
if err != nil {
|
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type mmf []byte
|
||||
|
||||
func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) {
|
||||
prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
|
||||
if writable {
|
||||
prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
|
||||
}
|
||||
hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil)
|
||||
if hMMF == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
defer syscall.CloseHandle(hMMF)
|
||||
addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length))
|
||||
m := mmf{}
|
||||
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
|
||||
h.Data = addr
|
||||
h.Len = length
|
||||
h.Cap = h.Len
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *mmf) unmap() {
|
||||
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
|
||||
*m = mmf{}
|
||||
err := syscall.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RequestLogOptions configures the retry policy's behavior.
|
||||
type RequestLogOptions struct {
|
||||
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
||||
// duration (-1=no logging; 0=default threshold).
|
||||
LogWarningIfTryOverThreshold time.Duration
|
||||
}
|
||||
|
||||
func (o RequestLogOptions) defaults() RequestLogOptions {
|
||||
if o.LogWarningIfTryOverThreshold == 0 {
|
||||
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
|
||||
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
|
||||
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
|
||||
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// These variables are per-policy; shared by multiple calls to Do
|
||||
var try int32
|
||||
operationStart := time.Now() // If this is the 1st try, record the operation state time
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
try++ // The first try is #1 (not #0)
|
||||
|
||||
// Log the outgoing request as informational
|
||||
if po.ShouldLog(pipeline.LogInfo) {
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
|
||||
po.Log(pipeline.LogInfo, b.String())
|
||||
}
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryStart := time.Now()
|
||||
response, err = next.Do(ctx, request) // Make the request
|
||||
tryEnd := time.Now()
|
||||
tryDuration := tryEnd.Sub(tryStart)
|
||||
opDuration := tryEnd.Sub(operationStart)
|
||||
|
||||
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
|
||||
|
||||
// If the response took too long, we'll upgrade to warning.
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
// Log a warning if the try duration exceeded the specified threshold
|
||||
logLevel, forceLog = pipeline.LogWarning, true
|
||||
}
|
||||
|
||||
if err == nil { // We got a response from the service
|
||||
sc := response.Response().StatusCode
|
||||
if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
|
||||
logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
|
||||
} else {
|
||||
// For other status codes, we leave the level as is.
|
||||
}
|
||||
} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
|
||||
logLevel, forceLog = pipeline.LogError, true
|
||||
}
|
||||
|
||||
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
|
||||
// We're going to log this; build the string to log
|
||||
b := &bytes.Buffer{}
|
||||
slow := ""
|
||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
||||
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
|
||||
}
|
||||
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
|
||||
if err != nil { // This HTTP request did not get a response from the service
|
||||
fmt.Fprint(b, "REQUEST ERROR\n")
|
||||
} else {
|
||||
if logLevel == pipeline.LogError {
|
||||
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
|
||||
} else {
|
||||
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
|
||||
}
|
||||
}
|
||||
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
|
||||
if logLevel <= pipeline.LogError {
|
||||
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
|
||||
}
|
||||
msg := b.String()
|
||||
|
||||
if forceLog {
|
||||
pipeline.ForceLog(logLevel, msg)
|
||||
}
|
||||
if shouldLog {
|
||||
po.Log(logLevel, msg)
|
||||
}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
||||
if !sigFound {
|
||||
sigFound = strings.Contains(rawQuery, "&sig=")
|
||||
if !sigFound {
|
||||
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
|
||||
}
|
||||
}
|
||||
// [?|&]sig= found, redact its value
|
||||
values, _ := url.ParseQuery(rawQuery)
|
||||
for name := range values {
|
||||
if strings.EqualFold(name, "sig") {
|
||||
values[name] = []string{"REDACTED"}
|
||||
}
|
||||
}
|
||||
return sigFound, values.Encode()
|
||||
}
|
||||
|
||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
||||
req = request.Copy()
|
||||
req.Request.URL.RawQuery = rawQuery
|
||||
}
|
||||
|
||||
return prepareRequestForServiceLogging(req)
|
||||
}
|
||||
|
||||
func stack() []byte {
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n := runtime.Stack(buf, false)
|
||||
if n < len(buf) {
|
||||
return buf[:n]
|
||||
}
|
||||
buf = make([]byte, 2*len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Redact phase useful for blob and file service only. For other services,
|
||||
// this method can directly return request.Request.
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
||||
req := request
|
||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
||||
req = request.Copy()
|
||||
url, err := url.Parse(req.Header.Get(key))
|
||||
if err == nil {
|
||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
||||
url.RawQuery = rawQuery
|
||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return req.Request
|
||||
}
|
||||
|
||||
const xMsCopySourceHeader = "x-ms-copy-source"
|
||||
|
||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
||||
for keyInHeader := range header {
|
||||
if strings.EqualFold(keyInHeader, key) {
|
||||
return true, keyInHeader
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
|
@ -0,0 +1,412 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
||||
type RetryPolicy int32
|
||||
|
||||
const (
|
||||
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
|
||||
RetryPolicyExponential RetryPolicy = 0
|
||||
|
||||
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
|
||||
RetryPolicyFixed RetryPolicy = 1
|
||||
)
|
||||
|
||||
// RetryOptions configures the retry policy's behavior.
|
||||
type RetryOptions struct {
|
||||
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
|
||||
// A value of zero means that you accept our default policy.
|
||||
Policy RetryPolicy
|
||||
|
||||
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
|
||||
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
|
||||
MaxTries int32
|
||||
|
||||
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
|
||||
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
|
||||
// of data, the default TryTimeout will probably not be sufficient. You should override this value
|
||||
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
|
||||
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
|
||||
TryTimeout time.Duration
|
||||
|
||||
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
|
||||
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
|
||||
// with each retry up to a maximum specified by MaxRetryDelay.
|
||||
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
|
||||
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
|
||||
// equal to or greater than RetryDelay.
|
||||
RetryDelay time.Duration
|
||||
|
||||
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
|
||||
// If you specify 0, then you must also specify 0 for RetryDelay.
|
||||
MaxRetryDelay time.Duration
|
||||
|
||||
// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
|
||||
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
|
||||
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
|
||||
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
|
||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
||||
}
|
||||
|
||||
func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
||||
//return "" // This is for non-blob SDKs
|
||||
}
|
||||
|
||||
func (o RetryOptions) defaults() RetryOptions {
|
||||
// We assume the following:
|
||||
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
||||
// 2. o.MaxTries >= 0
|
||||
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
||||
// 4. o.RetryDelay <= o.MaxRetryDelay
|
||||
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
||||
|
||||
IfDefault := func(current *time.Duration, desired time.Duration) {
|
||||
if *current == time.Duration(0) {
|
||||
*current = desired
|
||||
}
|
||||
}
|
||||
|
||||
// Set defaults if unspecified
|
||||
if o.MaxTries == 0 {
|
||||
o.MaxTries = 4
|
||||
}
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 4*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
|
||||
case RetryPolicyFixed:
|
||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
||||
IfDefault(&o.RetryDelay, 30*time.Second)
|
||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
|
||||
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
|
||||
var result int64 = 1
|
||||
for n := int32(0); n < exponent; n++ {
|
||||
result *= number
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
delay := time.Duration(0)
|
||||
switch o.Policy {
|
||||
case RetryPolicyExponential:
|
||||
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
|
||||
|
||||
case RetryPolicyFixed:
|
||||
if try > 1 { // Any try after the 1st uses the fixed delay
|
||||
delay = o.RetryDelay
|
||||
}
|
||||
}
|
||||
|
||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||
if delay > o.MaxRetryDelay {
|
||||
delay = o.MaxRetryDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
|
||||
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
||||
o = o.defaults() // Force defaults to be calculated
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
// Before each try, we'll select either the primary or secondary URL.
|
||||
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
|
||||
|
||||
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
|
||||
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
|
||||
|
||||
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
|
||||
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
|
||||
// If using a secondary:
|
||||
// Even tries go against primary; odd tries go against the secondary
|
||||
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
|
||||
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
|
||||
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
|
||||
for try := int32(1); try <= o.MaxTries; try++ {
|
||||
logf("\n=====> Try=%d\n", try)
|
||||
|
||||
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
|
||||
tryingPrimary := !considerSecondary || (try%2 == 1)
|
||||
// Select the correct host and delay
|
||||
if tryingPrimary {
|
||||
primaryTry++
|
||||
delay := o.calcDelay(primaryTry)
|
||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
||||
time.Sleep(delay) // The 1st try returns 0 delay
|
||||
} else {
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||
}
|
||||
|
||||
// Clone the original request to ensure that each try starts with the original (unmutated) request.
|
||||
requestCopy := request.Copy()
|
||||
|
||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
||||
// 1st try as for additional tries.
|
||||
err = requestCopy.RewindBody()
|
||||
if err != nil {
|
||||
return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption")
|
||||
}
|
||||
|
||||
if !tryingPrimary {
|
||||
requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost()
|
||||
}
|
||||
|
||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
|
||||
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
|
||||
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
|
||||
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
|
||||
if t < timeout {
|
||||
timeout = t
|
||||
}
|
||||
if timeout < 0 {
|
||||
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
|
||||
}
|
||||
logf("TryTimeout adjusted to=%d sec\n", timeout)
|
||||
}
|
||||
q := requestCopy.Request.URL.Query()
|
||||
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
|
||||
requestCopy.Request.URL.RawQuery = q.Encode()
|
||||
logf("Url=%s\n", requestCopy.Request.URL.String())
|
||||
|
||||
// Set the time for this particular retry operation and then Do the operation.
|
||||
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
|
||||
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
|
||||
response, err = next.Do(tryCtx, requestCopy) // Make the request
|
||||
/*err = improveDeadlineExceeded(err)
|
||||
if err == nil {
|
||||
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
|
||||
}*/
|
||||
logf("Err=%v, response=%v\n", err, response)
|
||||
|
||||
action := "" // This MUST get changed within the switch code below
|
||||
switch {
|
||||
case ctx.Err() != nil:
|
||||
action = "NoRetry: Op timeout"
|
||||
case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound:
|
||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
// the resource was not found. This may be due to replication delay. So, in this
|
||||
// case, we'll never try the secondary again for this operation.
|
||||
considerSecondary = false
|
||||
action = "Retry: Secondary URL returned 404"
|
||||
case err != nil:
|
||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
||||
// ServiceCode is set only when error related to storage service happened.
|
||||
if stErr, ok := err.(StorageError); ok {
|
||||
if stErr.Temporary() {
|
||||
action = "Retry: StorageError with error service code and Temporary()"
|
||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
||||
action = "Retry: StorageError with success status code"
|
||||
} else {
|
||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
||||
}
|
||||
} else if netErr, ok := err.(net.Error); ok {
|
||||
// Use non-retriable net.Error list, but not retriable list.
|
||||
// As there are errors without Temporary() implementation,
|
||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
||||
if !isNotRetriable(netErr) {
|
||||
action = "Retry: net.Error and not in the non-retriable list"
|
||||
} else {
|
||||
action = "NoRetry: net.Error and in the non-retriable list"
|
||||
}
|
||||
} else {
|
||||
action = "NoRetry: unrecognized error"
|
||||
}
|
||||
default:
|
||||
action = "NoRetry: successful HTTP request" // no error
|
||||
}
|
||||
|
||||
logf("Action=%s\n", action)
|
||||
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
|
||||
if action[0] != 'R' { // Retry only if action starts with 'R'
|
||||
if err != nil {
|
||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
||||
} else {
|
||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
||||
if response == nil || response.Response() == nil {
|
||||
// We do panic in the case response or response.Response() is nil,
|
||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
||||
// as in this case, current per-try has nothing to do in future.
|
||||
tryCancel()
|
||||
return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully")
|
||||
}
|
||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
||||
}
|
||||
break // Don't retry
|
||||
}
|
||||
if response != nil && response.Response() != nil && response.Response().Body != nil {
|
||||
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
|
||||
body := response.Response().Body
|
||||
io.Copy(ioutil.Discard, body)
|
||||
body.Close()
|
||||
}
|
||||
// If retrying, cancel the current per-try timeout context
|
||||
tryCancel()
|
||||
}
|
||||
return response, err // Not retryable or too many retries; return the last response/error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
||||
type contextCancelReadCloser struct {
|
||||
cf context.CancelFunc
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.body.Read(p)
|
||||
}
|
||||
|
||||
func (rc *contextCancelReadCloser) Close() error {
|
||||
err := rc.body.Close()
|
||||
if rc.cf != nil {
|
||||
rc.cf()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
||||
func isNotRetriable(errToParse net.Error) bool {
|
||||
// No error, so this is NOT retriable.
|
||||
if errToParse == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
||||
if errToParse.Temporary() || errToParse.Timeout() {
|
||||
return false
|
||||
}
|
||||
|
||||
genericErr := error(errToParse)
|
||||
|
||||
// From here all the error are neither Temporary() nor Timeout().
|
||||
switch err := errToParse.(type) {
|
||||
case *net.OpError:
|
||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
||||
if err.Err == nil {
|
||||
return true
|
||||
}
|
||||
genericErr = err.Err
|
||||
}
|
||||
|
||||
switch genericErr.(type) {
|
||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
||||
// If the error is one of the ones listed, then it is NOT retriable.
|
||||
return true
|
||||
}
|
||||
|
||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
||||
if strings.Contains(genericErr.Error(), "invalid header field") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Assume the error is retriable.
|
||||
return false
|
||||
}
|
||||
|
||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
||||
|
||||
func isSuccessStatusCode(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, i := range successStatusCodes {
|
||||
if i == resp.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
||||
var logf = func(format string, a ...interface{}) {}
|
||||
|
||||
// Use this version to see the retry method's code path (import "fmt")
|
||||
//var logf = fmt.Printf
|
||||
|
||||
/*
|
||||
type deadlineExceededReadCloser struct {
|
||||
r io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
|
||||
n, err := 0, io.EOF
|
||||
if r.r != nil {
|
||||
n, err = r.r.Read(p)
|
||||
}
|
||||
return n, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
// For an HTTP request, the ReadCloser MUST also implement seek
|
||||
// For an HTTP response, Seek MUST not be called (or this will panic)
|
||||
o, err := r.r.(io.Seeker).Seek(offset, whence)
|
||||
return o, improveDeadlineExceeded(err)
|
||||
}
|
||||
func (r *deadlineExceededReadCloser) Close() error {
|
||||
if c, ok := r.r.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// timeoutError is the internal struct that implements our richer timeout error.
|
||||
type deadlineExceeded struct {
|
||||
responseError
|
||||
}
|
||||
|
||||
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
|
||||
|
||||
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
|
||||
func improveDeadlineExceeded(cause error) error {
|
||||
// If cause is not DeadlineExceeded, return the same error passed in.
|
||||
if cause != context.DeadlineExceeded {
|
||||
return cause
|
||||
}
|
||||
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
|
||||
return &deadlineExceeded{
|
||||
responseError: responseError{
|
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *deadlineExceeded) Error() string {
|
||||
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
|
||||
}
|
||||
*/
|
|
@ -0,0 +1,51 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// TelemetryOptions configures the telemetry policy's behavior.
|
||||
type TelemetryOptions struct {
|
||||
// Value is a string prepended to each request's User-Agent and sent to the service.
|
||||
// The service records the user-agent in logs for diagnostics and tracking of client requests.
|
||||
Value string
|
||||
}
|
||||
|
||||
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
|
||||
// which add telemetry information to outgoing HTTP requests.
|
||||
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(o.Value)
|
||||
if b.Len() > 0 {
|
||||
b.WriteRune(' ')
|
||||
}
|
||||
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
|
||||
telemetryValue := b.String()
|
||||
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
request.Header.Set("User-Agent", telemetryValue)
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: the ONLY function that should write to this variable is this func
|
||||
var platformInfo = func() string {
|
||||
// Azure-Storage/version (runtime; os type and version)”
|
||||
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
|
||||
operatingSystem := runtime.GOOS // Default OS string
|
||||
switch operatingSystem {
|
||||
case "windows":
|
||||
operatingSystem = os.Getenv("OS") // Get more specific OS information
|
||||
case "linux": // accept default OS info
|
||||
case "freebsd": // accept default OS info
|
||||
}
|
||||
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
|
||||
}()
|
|
@ -0,0 +1,24 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
|
||||
// that sets the request's x-ms-client-request-id header if it doesn't already exist.
|
||||
func NewUniqueRequestIDPolicyFactory() pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
// This is Policy's Do method:
|
||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
id := request.Header.Get(xMsClientRequestID)
|
||||
if id == "" { // Add a unique request ID if the caller didn't specify one already
|
||||
request.Header.Set(xMsClientRequestID, newUUID().String())
|
||||
}
|
||||
return next.Do(ctx, request)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const xMsClientRequestID = "x-ms-client-request-id"
|
|
@ -0,0 +1,178 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const CountToEnd = 0
|
||||
|
||||
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
|
||||
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
|
||||
|
||||
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
|
||||
// that should be used to make an HTTP GET request.
|
||||
type HTTPGetterInfo struct {
|
||||
// Offset specifies the start offset that should be used when
|
||||
// creating the HTTP GET request's Range header
|
||||
Offset int64
|
||||
|
||||
// Count specifies the count of bytes that should be used to calculate
|
||||
// the end offset when creating the HTTP GET request's Range header
|
||||
Count int64
|
||||
|
||||
// ETag specifies the resource's etag that should be used when creating
|
||||
// the HTTP GET request's If-Match header
|
||||
ETag ETag
|
||||
}
|
||||
|
||||
// FailedReadNotifier is a function type that represents the notification function called when a read fails
|
||||
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
|
||||
|
||||
// RetryReaderOptions contains properties which can help to decide when to do retry.
|
||||
type RetryReaderOptions struct {
|
||||
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
|
||||
// while reading from a RetryReader. A value of zero means that no additional HTTP
|
||||
// GET requests will be made.
|
||||
MaxRetryRequests int
|
||||
doInjectError bool
|
||||
doInjectErrorRound int
|
||||
|
||||
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
|
||||
NotifyFailedRead FailedReadNotifier
|
||||
|
||||
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
|
||||
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
|
||||
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
|
||||
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
|
||||
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
|
||||
// treated as a fatal (non-retryable) error.
|
||||
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
|
||||
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
|
||||
// which will be retried.
|
||||
TreatEarlyCloseAsError bool
|
||||
}
|
||||
|
||||
// retryReader implements io.ReaderCloser methods.
|
||||
// retryReader tries to read from response, and if there is retriable network error
|
||||
// returned during reading, it will retry according to retry reader option through executing
|
||||
// user defined action with provided data to get a new response, and continue the overall reading process
|
||||
// through reading from the new response.
|
||||
type retryReader struct {
|
||||
ctx context.Context
|
||||
info HTTPGetterInfo
|
||||
countWasBounded bool
|
||||
o RetryReaderOptions
|
||||
getter HTTPGetter
|
||||
|
||||
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
|
||||
responseMu *sync.Mutex
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewRetryReader creates a retry reader.
|
||||
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
|
||||
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
|
||||
return &retryReader{
|
||||
ctx: ctx,
|
||||
getter: getter,
|
||||
info: info,
|
||||
countWasBounded: info.Count != CountToEnd,
|
||||
response: initialResponse,
|
||||
responseMu: &sync.Mutex{},
|
||||
o: o}
|
||||
}
|
||||
|
||||
func (s *retryReader) setResponse(r *http.Response) {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
s.response = r
|
||||
}
|
||||
|
||||
func (s *retryReader) Read(p []byte) (n int, err error) {
|
||||
for try := 0; ; try++ {
|
||||
//fmt.Println(try) // Comment out for debugging.
|
||||
if s.countWasBounded && s.info.Count == CountToEnd {
|
||||
// User specified an original count and the remaining bytes are 0, return 0, EOF
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
s.responseMu.Lock()
|
||||
resp := s.response
|
||||
s.responseMu.Unlock()
|
||||
if resp == nil { // We don't have a response stream to read from, try to get one.
|
||||
newResponse, err := s.getter(s.ctx, s.info)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Successful GET; this is the network stream we'll read from.
|
||||
s.setResponse(newResponse)
|
||||
resp = newResponse
|
||||
}
|
||||
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
|
||||
|
||||
// Injection mechanism for testing.
|
||||
if s.o.doInjectError && try == s.o.doInjectErrorRound {
|
||||
err = &net.DNSError{IsTemporary: true}
|
||||
}
|
||||
|
||||
// We successfully read data or end EOF.
|
||||
if err == nil || err == io.EOF {
|
||||
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
|
||||
if s.info.Count != CountToEnd {
|
||||
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
|
||||
}
|
||||
return n, err // Return the return to the caller
|
||||
}
|
||||
s.Close() // Error, close stream
|
||||
s.setResponse(nil) // Our stream is no longer good
|
||||
|
||||
// Check the retry count and error code, and decide whether to retry.
|
||||
retriesExhausted := try >= s.o.MaxRetryRequests
|
||||
_, isNetError := err.(net.Error)
|
||||
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
|
||||
|
||||
// Notify, for logging purposes, of any failures
|
||||
if s.o.NotifyFailedRead != nil {
|
||||
failureCount := try + 1 // because try is zero-based
|
||||
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
|
||||
}
|
||||
|
||||
if willRetry {
|
||||
continue
|
||||
// Loop around and try to get and read from new stream.
|
||||
}
|
||||
return n, err // Not retryable, or retries exhausted, so just return
|
||||
}
|
||||
}
|
||||
|
||||
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
|
||||
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
|
||||
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
|
||||
// which is exactly the behaviour we want.
|
||||
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
|
||||
// then there are two different types of error that may happen - either the one one we check for here,
|
||||
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
|
||||
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
|
||||
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
|
||||
if s.o.TreatEarlyCloseAsError {
|
||||
return false // user wants all early closes to be errors, and so not retryable
|
||||
}
|
||||
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
|
||||
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
|
||||
}
|
||||
|
||||
const ReadOnClosedBodyMessage = "read on closed response body"
|
||||
|
||||
func (s *retryReader) Close() error {
|
||||
s.responseMu.Lock()
|
||||
defer s.responseMu.Unlock()
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,218 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
|
||||
type AccountSASSignatureValues struct {
|
||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
||||
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
|
||||
IPRange IPRange `param:"sip"`
|
||||
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
|
||||
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
|
||||
}
|
||||
|
||||
// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
|
||||
// the proper SAS query parameters.
|
||||
func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
|
||||
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
|
||||
return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
|
||||
}
|
||||
if v.Version == "" {
|
||||
v.Version = SASVersion
|
||||
}
|
||||
perms := &AccountSASPermissions{}
|
||||
if err := perms.Parse(v.Permissions); err != nil {
|
||||
return SASQueryParameters{}, err
|
||||
}
|
||||
v.Permissions = perms.String()
|
||||
|
||||
startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime)
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
sharedKeyCredential.AccountName(),
|
||||
v.Permissions,
|
||||
v.Services,
|
||||
v.ResourceTypes,
|
||||
startTime,
|
||||
expiryTime,
|
||||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
""}, // That right, the account SAS requires a terminating extra newline
|
||||
"\n")
|
||||
|
||||
signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
|
||||
p := SASQueryParameters{
|
||||
// Common SAS parameters
|
||||
version: v.Version,
|
||||
protocol: v.Protocol,
|
||||
startTime: v.StartTime,
|
||||
expiryTime: v.ExpiryTime,
|
||||
permissions: v.Permissions,
|
||||
ipRange: v.IPRange,
|
||||
|
||||
// Account-specific SAS parameters
|
||||
services: v.Services,
|
||||
resourceTypes: v.ResourceTypes,
|
||||
|
||||
// Calculated SAS signature
|
||||
signature: signature,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
|
||||
type AccountSASPermissions struct {
|
||||
Read, Write, Delete, List, Add, Create, Update, Process bool
|
||||
}
|
||||
|
||||
// String produces the SAS permissions string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Permissions field.
|
||||
func (p AccountSASPermissions) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if p.Read {
|
||||
buffer.WriteRune('r')
|
||||
}
|
||||
if p.Write {
|
||||
buffer.WriteRune('w')
|
||||
}
|
||||
if p.Delete {
|
||||
buffer.WriteRune('d')
|
||||
}
|
||||
if p.List {
|
||||
buffer.WriteRune('l')
|
||||
}
|
||||
if p.Add {
|
||||
buffer.WriteRune('a')
|
||||
}
|
||||
if p.Create {
|
||||
buffer.WriteRune('c')
|
||||
}
|
||||
if p.Update {
|
||||
buffer.WriteRune('u')
|
||||
}
|
||||
if p.Process {
|
||||
buffer.WriteRune('p')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASPermissions's fields from a string.
|
||||
func (p *AccountSASPermissions) Parse(s string) error {
|
||||
*p = AccountSASPermissions{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'r':
|
||||
p.Read = true
|
||||
case 'w':
|
||||
p.Write = true
|
||||
case 'd':
|
||||
p.Delete = true
|
||||
case 'l':
|
||||
p.List = true
|
||||
case 'a':
|
||||
p.Add = true
|
||||
case 'c':
|
||||
p.Create = true
|
||||
case 'u':
|
||||
p.Update = true
|
||||
case 'p':
|
||||
p.Process = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid permission character: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
|
||||
type AccountSASServices struct {
|
||||
Blob, Queue, File bool
|
||||
}
|
||||
|
||||
// String produces the SAS services string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's Services field.
|
||||
func (s AccountSASServices) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if s.Blob {
|
||||
buffer.WriteRune('b')
|
||||
}
|
||||
if s.Queue {
|
||||
buffer.WriteRune('q')
|
||||
}
|
||||
if s.File {
|
||||
buffer.WriteRune('f')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASServices' fields from a string.
|
||||
func (a *AccountSASServices) Parse(s string) error {
|
||||
*a = AccountSASServices{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 'b':
|
||||
a.Blob = true
|
||||
case 'q':
|
||||
a.Queue = true
|
||||
case 'f':
|
||||
a.File = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid service character: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
|
||||
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
type AccountSASResourceTypes struct {
|
||||
Service, Container, Object bool
|
||||
}
|
||||
|
||||
// String produces the SAS resource types string for an Azure Storage account.
|
||||
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
|
||||
func (rt AccountSASResourceTypes) String() string {
|
||||
var buffer bytes.Buffer
|
||||
if rt.Service {
|
||||
buffer.WriteRune('s')
|
||||
}
|
||||
if rt.Container {
|
||||
buffer.WriteRune('c')
|
||||
}
|
||||
if rt.Object {
|
||||
buffer.WriteRune('o')
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Parse initializes the AccountSASResourceType's fields from a string.
|
||||
func (rt *AccountSASResourceTypes) Parse(s string) error {
|
||||
*rt = AccountSASResourceTypes{} // Clear out the flags
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 's':
|
||||
rt.Service = true
|
||||
case 'c':
|
||||
rt.Container = true
|
||||
case 'o':
|
||||
rt.Object = true
|
||||
default:
|
||||
return fmt.Errorf("Invalid resource type: '%v'", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,261 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SASVersion indicates the SAS version.
|
||||
const SASVersion = ServiceVersion
|
||||
|
||||
type SASProtocol string
|
||||
|
||||
const (
|
||||
// SASProtocolHTTPS can be specified for a SAS protocol
|
||||
SASProtocolHTTPS SASProtocol = "https"
|
||||
|
||||
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
|
||||
SASProtocolHTTPSandHTTP SASProtocol = "https,http"
|
||||
)
|
||||
|
||||
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
|
||||
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
|
||||
func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) {
|
||||
ss := ""
|
||||
if !startTime.IsZero() {
|
||||
ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
}
|
||||
se := ""
|
||||
if !expiryTime.IsZero() {
|
||||
se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
|
||||
}
|
||||
return ss, se
|
||||
}
|
||||
|
||||
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
|
||||
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
|
||||
// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
|
||||
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
|
||||
// to a query parameter map by calling AddToValues().
|
||||
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
|
||||
//
|
||||
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
|
||||
type SASQueryParameters struct {
|
||||
// All members are immutable or values so copies of this struct are goroutine-safe.
|
||||
version string `param:"sv"`
|
||||
services string `param:"ss"`
|
||||
resourceTypes string `param:"srt"`
|
||||
protocol SASProtocol `param:"spr"`
|
||||
startTime time.Time `param:"st"`
|
||||
expiryTime time.Time `param:"se"`
|
||||
ipRange IPRange `param:"sip"`
|
||||
identifier string `param:"si"`
|
||||
resource string `param:"sr"`
|
||||
permissions string `param:"sp"`
|
||||
signature string `param:"sig"`
|
||||
cacheControl string `param:"rscc"`
|
||||
contentDisposition string `param:"rscd"`
|
||||
contentEncoding string `param:"rsce"`
|
||||
contentLanguage string `param:"rscl"`
|
||||
contentType string `param:"rsct"`
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Version() string {
|
||||
return p.version
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Services() string {
|
||||
return p.services
|
||||
}
|
||||
func (p *SASQueryParameters) ResourceTypes() string {
|
||||
return p.resourceTypes
|
||||
}
|
||||
func (p *SASQueryParameters) Protocol() SASProtocol {
|
||||
return p.protocol
|
||||
}
|
||||
func (p *SASQueryParameters) StartTime() time.Time {
|
||||
return p.startTime
|
||||
}
|
||||
func (p *SASQueryParameters) ExpiryTime() time.Time {
|
||||
return p.expiryTime
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) IPRange() IPRange {
|
||||
return p.ipRange
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Identifier() string {
|
||||
return p.identifier
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Resource() string {
|
||||
return p.resource
|
||||
}
|
||||
func (p *SASQueryParameters) Permissions() string {
|
||||
return p.permissions
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) Signature() string {
|
||||
return p.signature
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) CacheControl() string {
|
||||
return p.cacheControl
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentDisposition() string {
|
||||
return p.contentDisposition
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentEncoding() string {
|
||||
return p.contentEncoding
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentLanguage() string {
|
||||
return p.contentLanguage
|
||||
}
|
||||
|
||||
func (p *SASQueryParameters) ContentType() string {
|
||||
return p.contentType
|
||||
}
|
||||
|
||||
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
|
||||
type IPRange struct {
|
||||
Start net.IP // Not specified if length = 0
|
||||
End net.IP // Not specified if length = 0
|
||||
}
|
||||
|
||||
// String returns a string representation of an IPRange.
|
||||
func (ipr *IPRange) String() string {
|
||||
if len(ipr.Start) == 0 {
|
||||
return ""
|
||||
}
|
||||
start := ipr.Start.String()
|
||||
if len(ipr.End) == 0 {
|
||||
return start
|
||||
}
|
||||
return start + "-" + ipr.End.String()
|
||||
}
|
||||
|
||||
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
|
||||
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
|
||||
// all SAS-related query parameters are removed from the passed-in map. If
|
||||
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
|
||||
func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
|
||||
p := SASQueryParameters{}
|
||||
for k, v := range values {
|
||||
val := v[0]
|
||||
isSASKey := true
|
||||
switch strings.ToLower(k) {
|
||||
case "sv":
|
||||
p.version = val
|
||||
case "ss":
|
||||
p.services = val
|
||||
case "srt":
|
||||
p.resourceTypes = val
|
||||
case "spr":
|
||||
p.protocol = SASProtocol(val)
|
||||
case "st":
|
||||
p.startTime, _ = time.Parse(SASTimeFormat, val)
|
||||
case "se":
|
||||
p.expiryTime, _ = time.Parse(SASTimeFormat, val)
|
||||
case "sip":
|
||||
dashIndex := strings.Index(val, "-")
|
||||
if dashIndex == -1 {
|
||||
p.ipRange.Start = net.ParseIP(val)
|
||||
} else {
|
||||
p.ipRange.Start = net.ParseIP(val[:dashIndex])
|
||||
p.ipRange.End = net.ParseIP(val[dashIndex+1:])
|
||||
}
|
||||
case "si":
|
||||
p.identifier = val
|
||||
case "sr":
|
||||
p.resource = val
|
||||
case "sp":
|
||||
p.permissions = val
|
||||
case "sig":
|
||||
p.signature = val
|
||||
case "rscc":
|
||||
p.cacheControl = val
|
||||
case "rscd":
|
||||
p.contentDisposition = val
|
||||
case "rsce":
|
||||
p.contentEncoding = val
|
||||
case "rscl":
|
||||
p.contentLanguage = val
|
||||
case "rsct":
|
||||
p.contentType = val
|
||||
default:
|
||||
isSASKey = false // We didn't recognize the query parameter
|
||||
}
|
||||
if isSASKey && deleteSASParametersFromValues {
|
||||
delete(values, k)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// AddToValues adds the SAS components to the specified query parameters map.
|
||||
func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
|
||||
if p.version != "" {
|
||||
v.Add("sv", p.version)
|
||||
}
|
||||
if p.services != "" {
|
||||
v.Add("ss", p.services)
|
||||
}
|
||||
if p.resourceTypes != "" {
|
||||
v.Add("srt", p.resourceTypes)
|
||||
}
|
||||
if p.protocol != "" {
|
||||
v.Add("spr", string(p.protocol))
|
||||
}
|
||||
if !p.startTime.IsZero() {
|
||||
v.Add("st", p.startTime.Format(SASTimeFormat))
|
||||
}
|
||||
if !p.expiryTime.IsZero() {
|
||||
v.Add("se", p.expiryTime.Format(SASTimeFormat))
|
||||
}
|
||||
if len(p.ipRange.Start) > 0 {
|
||||
v.Add("sip", p.ipRange.String())
|
||||
}
|
||||
if p.identifier != "" {
|
||||
v.Add("si", p.identifier)
|
||||
}
|
||||
if p.resource != "" {
|
||||
v.Add("sr", p.resource)
|
||||
}
|
||||
if p.permissions != "" {
|
||||
v.Add("sp", p.permissions)
|
||||
}
|
||||
if p.signature != "" {
|
||||
v.Add("sig", p.signature)
|
||||
}
|
||||
if p.cacheControl != "" {
|
||||
v.Add("rscc", p.cacheControl)
|
||||
}
|
||||
if p.contentDisposition != "" {
|
||||
v.Add("rscd", p.contentDisposition)
|
||||
}
|
||||
if p.contentEncoding != "" {
|
||||
v.Add("rsce", p.contentEncoding)
|
||||
}
|
||||
if p.contentLanguage != "" {
|
||||
v.Add("rscl", p.contentLanguage)
|
||||
}
|
||||
if p.contentType != "" {
|
||||
v.Add("rsct", p.contentType)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
|
||||
func (p *SASQueryParameters) Encode() string {
|
||||
v := url.Values{}
|
||||
p.addToValues(v)
|
||||
return v.Encode()
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package azblob
|
||||
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
|
||||
|
||||
const (
|
||||
// ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
|
||||
ServiceCodeNone ServiceCodeType = ""
|
||||
|
||||
// ServiceCodeAccountAlreadyExists means the specified account already exists.
|
||||
ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists"
|
||||
|
||||
// ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
|
||||
ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated"
|
||||
|
||||
// ServiceCodeAccountIsDisabled means the specified account is disabled (403).
|
||||
ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled"
|
||||
|
||||
// ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
|
||||
ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed"
|
||||
|
||||
// ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
|
||||
ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported"
|
||||
|
||||
// ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
|
||||
ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet"
|
||||
|
||||
// ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
|
||||
ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey"
|
||||
|
||||
// ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
|
||||
ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions"
|
||||
|
||||
// ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
|
||||
ServiceCodeInternalError ServiceCodeType = "InternalError"
|
||||
|
||||
// ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
|
||||
ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo"
|
||||
|
||||
// ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
|
||||
ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue"
|
||||
|
||||
// ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
|
||||
ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb"
|
||||
|
||||
// ServiceCodeInvalidInput means one of the request inputs is not valid (400).
|
||||
ServiceCodeInvalidInput ServiceCodeType = "InvalidInput"
|
||||
|
||||
// ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
|
||||
ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5"
|
||||
|
||||
// ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
|
||||
ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata"
|
||||
|
||||
// ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
|
||||
ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue"
|
||||
|
||||
// ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
|
||||
ServiceCodeInvalidRange ServiceCodeType = "InvalidRange"
|
||||
|
||||
// ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
|
||||
ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName"
|
||||
|
||||
// ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
|
||||
ServiceCodeInvalidURI ServiceCodeType = "InvalidUri"
|
||||
|
||||
// ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
|
||||
ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument"
|
||||
|
||||
// ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
|
||||
ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue"
|
||||
|
||||
// ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
|
||||
ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch"
|
||||
|
||||
// ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
|
||||
ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge"
|
||||
|
||||
// ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
|
||||
ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader"
|
||||
|
||||
// ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
|
||||
ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter"
|
||||
|
||||
// ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
|
||||
ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader"
|
||||
|
||||
// ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
|
||||
ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode"
|
||||
|
||||
// ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
|
||||
ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported"
|
||||
|
||||
// ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
|
||||
ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut"
|
||||
|
||||
// ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
|
||||
ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput"
|
||||
|
||||
// ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
|
||||
ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue"
|
||||
|
||||
// ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
|
||||
ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge"
|
||||
|
||||
// ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
|
||||
ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch"
|
||||
|
||||
// ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
|
||||
ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse"
|
||||
|
||||
// ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
|
||||
ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists"
|
||||
|
||||
// ServiceCodeResourceNotFound means the specified resource does not exist (404).
|
||||
ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound"
|
||||
|
||||
// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
|
||||
ServiceCodeServerBusy ServiceCodeType = "ServerBusy"
|
||||
|
||||
// ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
|
||||
ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader"
|
||||
|
||||
// ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
|
||||
ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode"
|
||||
|
||||
// ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
|
||||
ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter"
|
||||
|
||||
// ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
|
||||
ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb"
|
||||
)
|
|
@ -0,0 +1,112 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
externalAZBlob "github.com/Azure/azure-storage-blob-go/azblob"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// wire up our custom error handling constructor
|
||||
responseErrorFactory = newStorageError
|
||||
}
|
||||
|
||||
// ServiceCodeType is a string identifying a storage service error.
|
||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
|
||||
type ServiceCodeType = externalAZBlob.ServiceCodeType
|
||||
|
||||
// StorageError identifies a responder-generated network or response parsing error.
|
||||
type StorageError interface {
|
||||
// ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
|
||||
ResponseError
|
||||
|
||||
// ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
|
||||
ServiceCode() ServiceCodeType
|
||||
}
|
||||
|
||||
// storageError is the internal struct that implements the public StorageError interface.
|
||||
type storageError struct {
|
||||
responseError
|
||||
serviceCode ServiceCodeType
|
||||
details map[string]string
|
||||
}
|
||||
|
||||
// newStorageError creates an error object that implements the error interface.
|
||||
func newStorageError(cause error, response *http.Response, description string) error {
|
||||
return &storageError{
|
||||
responseError: responseError{
|
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
||||
response: response,
|
||||
description: description,
|
||||
},
|
||||
serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
|
||||
func (e *storageError) ServiceCode() ServiceCodeType {
|
||||
return e.serviceCode
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *storageError) Error() string {
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode)
|
||||
fmt.Fprintf(b, "Description=%s, Details: ", e.description)
|
||||
if len(e.details) == 0 {
|
||||
b.WriteString("(none)\n")
|
||||
} else {
|
||||
b.WriteRune('\n')
|
||||
keys := make([]string, 0, len(e.details))
|
||||
// Alphabetize the details
|
||||
for k := range e.details {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
fmt.Fprintf(b, " %s: %+v\n", k, e.details[k])
|
||||
}
|
||||
}
|
||||
req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
|
||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil)
|
||||
return e.ErrorNode.Error(b.String())
|
||||
}
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
|
||||
func (e *storageError) Temporary() bool {
|
||||
if e.response != nil {
|
||||
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return e.ErrorNode.Temporary()
|
||||
}
|
||||
|
||||
// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
|
||||
func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
tokName := ""
|
||||
var t xml.Token
|
||||
for t, err = d.Token(); err == nil; t, err = d.Token() {
|
||||
switch tt := t.(type) {
|
||||
case xml.StartElement:
|
||||
tokName = tt.Name.Local
|
||||
break
|
||||
case xml.CharData:
|
||||
switch tokName {
|
||||
case "Message":
|
||||
e.description = string(tt)
|
||||
default:
|
||||
if e.details == nil {
|
||||
e.details = map[string]string{}
|
||||
}
|
||||
e.details[tokName] = string(tt)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// httpRange defines a range of bytes within an HTTP resource, starting at offset and
|
||||
// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange
|
||||
// which has an offset but na zero value count indicates from the offset to the resource's end.
|
||||
type httpRange struct {
|
||||
offset int64
|
||||
count int64
|
||||
}
|
||||
|
||||
func (r httpRange) pointers() *string {
|
||||
if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
|
||||
return nil // No specified range
|
||||
}
|
||||
endOffset := "" // if count == CountToEnd (0)
|
||||
if r.count > 0 {
|
||||
endOffset = strconv.FormatInt((r.offset+r.count)-1, 10)
|
||||
}
|
||||
dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset)
|
||||
return &dataRange
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) {
|
||||
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err := validateSeekableStreamAt0(body)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count, err := body.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, errors.New("body stream must be seekable")
|
||||
}
|
||||
|
||||
body.Seek(0, io.SeekStart)
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// return an error if body is not a valid seekable stream at 0
|
||||
func validateSeekableStreamAt0(body io.ReadSeeker) error {
|
||||
if body == nil { // nil body's are "logically" seekable to 0
|
||||
return nil
|
||||
}
|
||||
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
|
||||
// Help detect programmer error
|
||||
if err != nil {
|
||||
return errors.New("body stream must be seekable")
|
||||
}
|
||||
return errors.New("body stream must be set to position 0")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// The UUID reserved variants.
|
||||
const (
|
||||
reservedNCS byte = 0x80
|
||||
reservedRFC4122 byte = 0x40
|
||||
reservedMicrosoft byte = 0x20
|
||||
reservedFuture byte = 0x00
|
||||
)
|
||||
|
||||
// A UUID representation compliant with specification in RFC 4122 document.
|
||||
type uuid [16]byte
|
||||
|
||||
// NewUUID returns a new uuid using RFC 4122 algorithm.
|
||||
func newUUID() (u uuid) {
|
||||
u = uuid{}
|
||||
// Set all bits to randomly (or pseudo-randomly) chosen values.
|
||||
rand.Read(u[:])
|
||||
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
|
||||
|
||||
var version byte = 4
|
||||
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
|
||||
return
|
||||
}
|
||||
|
||||
// String returns an unparsed version of the generated UUID sequence.
|
||||
func (u uuid) String() string {
|
||||
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
|
||||
}
|
||||
|
||||
// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
|
||||
// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
|
||||
func parseUUID(uuidStr string) uuid {
|
||||
char := func(hexString string) byte {
|
||||
i, _ := strconv.ParseUint(hexString, 16, 8)
|
||||
return byte(i)
|
||||
}
|
||||
if uuidStr[0] == '{' {
|
||||
uuidStr = uuidStr[1:] // Skip over the '{'
|
||||
}
|
||||
// 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
|
||||
// 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
|
||||
// 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
|
||||
uuidVal := uuid{
|
||||
char(uuidStr[0:2]),
|
||||
char(uuidStr[2:4]),
|
||||
char(uuidStr[4:6]),
|
||||
char(uuidStr[6:8]),
|
||||
|
||||
char(uuidStr[9:11]),
|
||||
char(uuidStr[11:13]),
|
||||
|
||||
char(uuidStr[14:16]),
|
||||
char(uuidStr[16:18]),
|
||||
|
||||
char(uuidStr[19:21]),
|
||||
char(uuidStr[21:23]),
|
||||
|
||||
char(uuidStr[24:26]),
|
||||
char(uuidStr[26:28]),
|
||||
char(uuidStr[28:30]),
|
||||
char(uuidStr[30:32]),
|
||||
char(uuidStr[32:34]),
|
||||
char(uuidStr[34:36]),
|
||||
}
|
||||
return uuidVal
|
||||
}
|
||||
|
||||
func (u uuid) bytes() []byte {
|
||||
return u[:]
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package azblob allows you to manipulate Azure Storage containers and blobs objects.
|
||||
|
||||
URL Types
|
||||
|
||||
The most common types you'll work with are the XxxURL types. The methods of these types make requests
|
||||
against the Azure Storage Service.
|
||||
|
||||
- ServiceURL's methods perform operations on a storage account.
|
||||
- ContainerURL's methods perform operations on an account's container.
|
||||
- BlockBlobURL's methods perform operations on a container's block blob.
|
||||
- AppendBlobURL's methods perform operations on a container's append blob.
|
||||
- PageBlobURL's methods perform operations on a container's page blob.
|
||||
- BlobURL's methods perform operations on a container's blob regardless of the blob's type.
|
||||
|
||||
Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP
|
||||
request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed.
|
||||
The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
|
||||
|
||||
Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass
|
||||
an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own
|
||||
URL but it shares the same pipeline as the parent ServiceURL object.
|
||||
|
||||
To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob.
|
||||
To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL
|
||||
respectively. These three types are all identical except for the methods they expose; each type exposes the methods
|
||||
relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL;
|
||||
this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL,
|
||||
the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You
|
||||
can easily switch between blob types (method sets) by calling a ToXxxBlobURL method.
|
||||
|
||||
If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL
|
||||
object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object
|
||||
with the same URL as the original but with the specified pipeline.
|
||||
|
||||
Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that
|
||||
XxxURL objects share a lot of system resources making them very efficient.
|
||||
|
||||
All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures,
|
||||
transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an
|
||||
example of how to do deal with errors.
|
||||
|
||||
URL and Shared Access Signature Manipulation
|
||||
|
||||
The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types
|
||||
for generating and parsing Shared Access Signature (SAS)
|
||||
- Use the AccountSASSignatureValues type to create a SAS for a storage account.
|
||||
- Use the BlobSASSignatureValues type to create a SAS for a container or blob.
|
||||
- Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters.
|
||||
|
||||
To generate a SAS, you must use the SharedKeyCredential type.
|
||||
|
||||
Credentials
|
||||
|
||||
When creating a request pipeline, you must specify one of this package's credential types.
|
||||
- Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS).
|
||||
- Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this
|
||||
to generate Shared Access Signatures.
|
||||
|
||||
HTTP Request Policy Factories
|
||||
|
||||
This package defines several request policy factories for use with the pipeline package.
|
||||
Most applications will not use these factories directly; instead, the NewPipeline
|
||||
function creates these factories, initializes them (via the PipelineOptions type)
|
||||
and returns a pipeline object for use by the XxxURL objects.
|
||||
|
||||
However, for advanced scenarios, developers can access these policy factories directly
|
||||
and even create their own and then construct their own pipeline in order to affect HTTP
|
||||
requests and responses performed by the XxxURL objects. For example, developers can
|
||||
introduce their own logging, random failures, request recording & playback for fast
|
||||
testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The
|
||||
possibilities are endless!
|
||||
|
||||
Below are the request pipeline policy factory functions that are provided with this
|
||||
package:
|
||||
- NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests.
|
||||
- NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures.
|
||||
- NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests.
|
||||
- NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures.
|
||||
|
||||
Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline.
|
||||
*/
|
||||
package azblob
|
||||
|
||||
// TokenCredential Use this to access resources using Role-Based Access Control (RBAC).
|
|
@ -0,0 +1,337 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
|
||||
type appendBlobClient struct {
|
||||
managementClient
|
||||
}
|
||||
|
||||
// newAppendBlobClient creates an instance of the appendBlobClient client.
|
||||
func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {
|
||||
return appendBlobClient{newManagementClient(url, p)}
|
||||
}
|
||||
|
||||
// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append
|
||||
// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is
|
||||
// supported only on version 2015-02-21 version or later.
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*AppendBlobAppendBlockResponse), err
|
||||
}
|
||||
|
||||
// appendBlockPreparer prepares the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "appendblock")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if maxSize != nil {
|
||||
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10))
|
||||
}
|
||||
if appendPosition != nil {
|
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// appendBlockResponder handles the response to the AppendBlock request.
|
||||
func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob
|
||||
// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created
|
||||
// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
|
||||
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
|
||||
// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*AppendBlobAppendBlockFromURLResponse), err
|
||||
}
|
||||
|
||||
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "appendblock")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
if sourceRange != nil {
|
||||
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||
}
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if maxSize != nil {
|
||||
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10))
|
||||
}
|
||||
if appendPosition != nil {
|
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// Create the Create Append Blob operation creates a new append blob.
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*AppendBlobCreateResponse), err
|
||||
}
|
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
}
|
||||
if blobContentEncoding != nil {
|
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
|
||||
}
|
||||
if blobContentLanguage != nil {
|
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
|
||||
}
|
||||
if blobContentMD5 != nil {
|
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
|
||||
}
|
||||
if blobCacheControl != nil {
|
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
|
||||
}
|
||||
if metadata != nil {
|
||||
for k, v := range metadata {
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-blob-type", "AppendBlob")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// createResponder handles the response to the Create request.
|
||||
func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,495 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.
|
||||
type blockBlobClient struct {
|
||||
managementClient
|
||||
}
|
||||
|
||||
// newBlockBlobClient creates an instance of the blockBlobClient client.
|
||||
func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient {
|
||||
return blockBlobClient{newManagementClient(url, p)}
|
||||
}
|
||||
|
||||
// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the
|
||||
// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior
|
||||
// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed,
|
||||
// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from
|
||||
// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
|
||||
// block, whichever list it may belong to.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's
|
||||
// content type. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
|
||||
// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
|
||||
// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
|
||||
// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlockBlobCommitBlockListResponse), err
|
||||
}
|
||||
|
||||
// commitBlockListPreparer prepares the CommitBlockList request.
|
||||
func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "blocklist")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if blobCacheControl != nil {
|
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
|
||||
}
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
}
|
||||
if blobContentEncoding != nil {
|
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
|
||||
}
|
||||
if blobContentLanguage != nil {
|
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
|
||||
}
|
||||
if blobContentMD5 != nil {
|
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
|
||||
}
|
||||
if metadata != nil {
|
||||
for k, v := range metadata {
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
b, err := xml.Marshal(blocks)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to marshal request body")
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
err = req.SetBody(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to set request body")
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// commitBlockListResponder handles the response to the CommitBlockList request.
|
||||
func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block
|
||||
// blob
|
||||
//
|
||||
// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists
|
||||
// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob
|
||||
// snapshot to retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlockList), err
|
||||
}
|
||||
|
||||
// getBlockListPreparer prepares the GetBlockList request.
|
||||
func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if snapshot != nil && len(*snapshot) > 0 {
|
||||
params.Set("snapshot", *snapshot)
|
||||
}
|
||||
params.Set("blocklisttype", string(listType))
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "blocklist")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getBlockListResponder handles the response to the GetBlockList request.
|
||||
func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &BlockList{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// StageBlock the Stage Block operation creates a new block to be committed as part of a blob
|
||||
//
|
||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
|
||||
// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
|
||||
// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlockBlobStageBlockResponse), err
|
||||
}
|
||||
|
||||
// stageBlockPreparer prepares the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("blockid", blockID)
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "block")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// stageBlockResponder handles the response to the StageBlock request.
|
||||
func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents
|
||||
// are read from a URL.
|
||||
//
|
||||
// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
|
||||
// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
|
||||
// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
|
||||
// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
|
||||
// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
|
||||
// more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
|
||||
// limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlockBlobStageBlockFromURLResponse), err
|
||||
}
|
||||
|
||||
// stageBlockFromURLPreparer prepares the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("blockid", blockID)
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "block")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
if sourceRange != nil {
|
||||
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||
}
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// stageBlockFromURLResponder handles the response to the StageBlockFromURL request.
|
||||
func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block
|
||||
// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of
|
||||
// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a
|
||||
// block blob, use the Put Block List operation.
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*BlockBlobUploadResponse), err
|
||||
}
|
||||
|
||||
// uploadPreparer prepares the Upload request.
|
||||
func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
}
|
||||
if blobContentEncoding != nil {
|
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
|
||||
}
|
||||
if blobContentLanguage != nil {
|
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
|
||||
}
|
||||
if blobContentMD5 != nil {
|
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
|
||||
}
|
||||
if blobCacheControl != nil {
|
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
|
||||
}
|
||||
if metadata != nil {
|
||||
for k, v := range metadata {
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-blob-type", "BlockBlob")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// uploadResponder handles the response to the Upload request.
|
||||
func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
const (
|
||||
// ServiceVersion specifies the version of the operations used in this package.
|
||||
ServiceVersion = "2018-11-09"
|
||||
)
|
||||
|
||||
// managementClient is the base client for Azblob.
|
||||
type managementClient struct {
|
||||
url url.URL
|
||||
p pipeline.Pipeline
|
||||
}
|
||||
|
||||
// newManagementClient creates an instance of the managementClient client.
|
||||
func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient {
|
||||
return managementClient{
|
||||
url: url,
|
||||
p: p,
|
||||
}
|
||||
}
|
||||
|
||||
// URL returns a copy of the URL for this client.
|
||||
func (mc managementClient) URL() url.URL {
|
||||
return mc.url
|
||||
}
|
||||
|
||||
// Pipeline returns the pipeline for this client.
|
||||
func (mc managementClient) Pipeline() pipeline.Pipeline {
|
||||
return mc.p
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,881 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// pageBlobClient is the client for the PageBlob methods of the Azblob service.
|
||||
type pageBlobClient struct {
|
||||
managementClient
|
||||
}
|
||||
|
||||
// newPageBlobClient creates an instance of the pageBlobClient client.
|
||||
func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient {
|
||||
return pageBlobClient{newManagementClient(url, p)}
|
||||
}
|
||||
|
||||
// ClearPages the Clear Pages operation clears a set of pages from a page blob
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobClearPagesResponse), err
|
||||
}
|
||||
|
||||
// clearPagesPreparer prepares the ClearPages request.
|
||||
func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "page")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
if ifSequenceNumberLessThan != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
|
||||
}
|
||||
if ifSequenceNumberEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-page-write", "clear")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// clearPagesResponder handles the response to the ClearPages request.
|
||||
func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
|
||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are
|
||||
// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
|
||||
// copied from as usual. This API is supported since REST version 2016-05-31.
|
||||
//
|
||||
// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
|
||||
// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
|
||||
// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
|
||||
// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
|
||||
// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
|
||||
// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
|
||||
// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
|
||||
// in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobCopyIncrementalResponse), err
|
||||
}
|
||||
|
||||
// copyIncrementalPreparer prepares the CopyIncremental request.
|
||||
func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "incrementalcopy")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-copy-source", copySource)
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// copyIncrementalResponder handles the response to the CopyIncremental request.
|
||||
func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// Create the Create operation creates a new page blob.
|
||||
//
|
||||
// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page
|
||||
// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
|
||||
// expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
|
||||
// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
|
||||
// blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
|
||||
// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
|
||||
// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
|
||||
// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
|
||||
// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
|
||||
// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
|
||||
// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
|
||||
// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
|
||||
// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
|
||||
// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
|
||||
// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
|
||||
// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
|
||||
// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
|
||||
// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobCreateResponse), err
|
||||
}
|
||||
|
||||
// createPreparer prepares the Create request.
|
||||
func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if blobContentType != nil {
|
||||
req.Header.Set("x-ms-blob-content-type", *blobContentType)
|
||||
}
|
||||
if blobContentEncoding != nil {
|
||||
req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding)
|
||||
}
|
||||
if blobContentLanguage != nil {
|
||||
req.Header.Set("x-ms-blob-content-language", *blobContentLanguage)
|
||||
}
|
||||
if blobContentMD5 != nil {
|
||||
req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5))
|
||||
}
|
||||
if blobCacheControl != nil {
|
||||
req.Header.Set("x-ms-blob-cache-control", *blobCacheControl)
|
||||
}
|
||||
if metadata != nil {
|
||||
for k, v := range metadata {
|
||||
req.Header.Set("x-ms-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if blobContentDisposition != nil {
|
||||
req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
|
||||
if blobSequenceNumber != nil {
|
||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-blob-type", "PageBlob")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// createResponder handles the response to the Create request.
|
||||
func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobCreateResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a
|
||||
// page blob
|
||||
//
|
||||
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
||||
// retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
|
||||
// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
|
||||
// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
|
||||
// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageList), err
|
||||
}
|
||||
|
||||
// getPageRangesPreparer prepares the GetPageRanges request.
|
||||
func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if snapshot != nil && len(*snapshot) > 0 {
|
||||
params.Set("snapshot", *snapshot)
|
||||
}
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "pagelist")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getPageRangesResponder handles the response to the GetPageRanges request.
|
||||
func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &PageList{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetPageRangesDiff [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob
|
||||
// that were changed between target blob and previous snapshot.
|
||||
//
|
||||
// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
|
||||
// retrieve. For more information on working with blob snapshots, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
|
||||
// a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot
|
||||
// parameter is a DateTime value that specifies that the response will contain only pages that were changed between
|
||||
// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
|
||||
// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
|
||||
// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
|
||||
// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
|
||||
// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageList), err
|
||||
}
|
||||
|
||||
// getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
|
||||
func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if snapshot != nil && len(*snapshot) > 0 {
|
||||
params.Set("snapshot", *snapshot)
|
||||
}
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
if prevsnapshot != nil && len(*prevsnapshot) > 0 {
|
||||
params.Set("prevsnapshot", *prevsnapshot)
|
||||
}
|
||||
params.Set("comp", "pagelist")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request.
|
||||
func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &PageList{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Resize resize the Blob
|
||||
//
|
||||
// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must
|
||||
// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
|
||||
// see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobResizeResponse), err
|
||||
}
|
||||
|
||||
// resizePreparer prepares the Resize request.
|
||||
func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// resizeResponder handles the response to the Resize request.
|
||||
func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobResizeResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// UpdateSequenceNumber update the sequence number of the blob
|
||||
//
|
||||
// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property
|
||||
// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
|
||||
// is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
|
||||
// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
|
||||
// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
|
||||
// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to
|
||||
// track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
|
||||
// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
|
||||
// analytics logging is enabled.
|
||||
func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobUpdateSequenceNumberResponse), err
|
||||
}
|
||||
|
||||
// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
|
||||
func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction))
|
||||
if blobSequenceNumber != nil {
|
||||
req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request.
|
||||
func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// UploadPages the Upload Pages operation writes a range of pages to a page blob
|
||||
//
|
||||
// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
|
||||
// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
|
||||
// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
|
||||
// information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
|
||||
// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
|
||||
// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
|
||||
// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
|
||||
// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
|
||||
// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
|
||||
// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
|
||||
// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
|
||||
// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
|
||||
// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
|
||||
// recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: body,
|
||||
constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobUploadPagesResponse), err
|
||||
}
|
||||
|
||||
// uploadPagesPreparer prepares the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, body)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "page")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if rangeParameter != nil {
|
||||
req.Header.Set("x-ms-range", *rangeParameter)
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
if ifSequenceNumberLessThan != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
|
||||
}
|
||||
if ifSequenceNumberEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-page-write", "update")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// uploadPagesResponder handles the response to the UploadPages request.
|
||||
func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read
|
||||
// from a URL
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The
|
||||
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
|
||||
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
|
||||
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
|
||||
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
|
||||
// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
|
||||
// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
|
||||
// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
|
||||
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
|
||||
// specify an ETag value to operate only on blobs without a matching value. requestID is provides a client-generated,
|
||||
// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
|
||||
// enabled.
|
||||
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobUploadPagesFromURLResponse), err
|
||||
}
|
||||
|
||||
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "page")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
req.Header.Set("x-ms-source-range", sourceRange)
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
req.Header.Set("x-ms-range", rangeParameter)
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
if ifSequenceNumberLessThan != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
|
||||
}
|
||||
if ifSequenceNumberEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-page-write", "update")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
type responder func(resp pipeline.Response) (result pipeline.Response, err error)
|
||||
|
||||
// ResponderPolicyFactory is a Factory capable of creating a responder pipeline.
|
||||
type responderPolicyFactory struct {
|
||||
responder responder
|
||||
}
|
||||
|
||||
// New creates a responder policy factory.
|
||||
func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return responderPolicy{next: next, responder: arpf.responder}
|
||||
}
|
||||
|
||||
type responderPolicy struct {
|
||||
next pipeline.Policy
|
||||
responder responder
|
||||
}
|
||||
|
||||
// Do sends the request to the service and validates/deserializes the HTTP response.
|
||||
func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
resp, err := arp.next.Do(ctx, request)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
return arp.responder(resp)
|
||||
}
|
||||
|
||||
// validateResponse checks an HTTP response's status code against a legal set of codes.
|
||||
// If the response code is not legal, then validateResponse reads all of the response's body
|
||||
// (containing error information) and returns a response error.
|
||||
func validateResponse(resp pipeline.Response, successStatusCodes ...int) error {
|
||||
if resp == nil {
|
||||
return NewResponseError(nil, nil, "nil response")
|
||||
}
|
||||
responseCode := resp.Response().StatusCode
|
||||
for _, i := range successStatusCodes {
|
||||
if i == responseCode {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// only close the body in the failure case. in the
|
||||
// success case responders will close the body as required.
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// the service code, description and details will be populated during unmarshalling
|
||||
responseError := NewResponseError(nil, resp.Response(), resp.Response().Status)
|
||||
if len(b) > 0 {
|
||||
if err = xml.Unmarshal(b, &responseError); err != nil {
|
||||
return NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return responseError
|
||||
}
|
||||
|
||||
// removes any BOM from the byte slice
|
||||
func removeBOM(b []byte) []byte {
|
||||
// UTF8
|
||||
return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// if you want to provide custom error handling set this variable to your constructor function
|
||||
var responseErrorFactory func(cause error, response *http.Response, description string) error
|
||||
|
||||
// ResponseError identifies a responder-generated network or response parsing error.
|
||||
type ResponseError interface {
|
||||
// Error exposes the Error(), Temporary() and Timeout() methods.
|
||||
net.Error // Includes the Go error interface
|
||||
// Response returns the HTTP response. You may examine this but you should not modify it.
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// NewResponseError creates an error object that implements the error interface.
|
||||
func NewResponseError(cause error, response *http.Response, description string) error {
|
||||
if responseErrorFactory != nil {
|
||||
return responseErrorFactory(cause, response, description)
|
||||
}
|
||||
return &responseError{
|
||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
||||
response: response,
|
||||
description: description,
|
||||
}
|
||||
}
|
||||
|
||||
// responseError is the internal struct that implements the public ResponseError interface.
|
||||
type responseError struct {
|
||||
pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause
|
||||
response *http.Response
|
||||
description string
|
||||
}
|
||||
|
||||
// Error implements the error interface's Error method to return a string representation of the error.
|
||||
func (e *responseError) Error() string {
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode)
|
||||
fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description)
|
||||
s := b.String()
|
||||
return e.ErrorNode.Error(s)
|
||||
}
|
||||
|
||||
// Response implements the ResponseError interface's method to return the HTTP response.
|
||||
func (e *responseError) Response() *http.Response {
|
||||
return e.response
|
||||
}
|
||||
|
||||
// RFC7807 PROBLEM ------------------------------------------------------------------------------------
|
||||
// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members.
|
||||
/*type RFC7807Problem struct {
|
||||
// Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation).
|
||||
typeURI string // Should default to "about:blank"
|
||||
// Optional: Short, human-readable summary (maybe localized).
|
||||
title string
|
||||
// Optional: HTTP status code generated by the origin server
|
||||
status int
|
||||
// Optional: Human-readable explanation for this problem occurance.
|
||||
// Should help client correct the problem. Clients should NOT parse this string.
|
||||
detail string
|
||||
// Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced).
|
||||
instance string
|
||||
}
|
||||
// NewRFC7807Problem ...
|
||||
func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error {
|
||||
return &RFC7807Problem{
|
||||
typeURI: typeURI,
|
||||
status: status,
|
||||
title: fmt.Sprintf(titleFormat, a...),
|
||||
}
|
||||
}
|
||||
// Error returns the error information as a string.
|
||||
func (e *RFC7807Problem) Error() string {
|
||||
return e.title
|
||||
}
|
||||
// TypeURI ...
|
||||
func (e *RFC7807Problem) TypeURI() string {
|
||||
if e.typeURI == "" {
|
||||
e.typeURI = "about:blank"
|
||||
}
|
||||
return e.typeURI
|
||||
}
|
||||
// Members ...
|
||||
func (e *RFC7807Problem) Members() (status int, title, detail, instance string) {
|
||||
return e.status, e.title, e.detail, e.instance
|
||||
}*/
|
|
@ -0,0 +1,389 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// serviceClient is the client for the Service methods of the Azblob service.
|
||||
type serviceClient struct {
|
||||
managementClient
|
||||
}
|
||||
|
||||
// newServiceClient creates an instance of the serviceClient client.
|
||||
func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient {
|
||||
return serviceClient{newManagementClient(url, p)}
|
||||
}
|
||||
|
||||
// GetAccountInfo returns the sku name and account kind
|
||||
func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
||||
req, err := client.getAccountInfoPreparer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ServiceGetAccountInfoResponse), err
|
||||
}
|
||||
|
||||
// getAccountInfoPreparer prepares the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("restype", "account")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getAccountInfoResponder handles the response to the GetAccountInfo request.
|
||||
func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
|
||||
// and CORS (Cross-Origin Resource Sharing) rules.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getPropertiesPreparer(timeout, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*StorageServiceProperties), err
|
||||
}
|
||||
|
||||
// getPropertiesPreparer prepares the GetProperties request.
|
||||
func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("restype", "service")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getPropertiesResponder handles the response to the GetProperties request.
|
||||
func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &StorageServiceProperties{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the
|
||||
// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.
|
||||
//
|
||||
// timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.getStatisticsPreparer(timeout, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*StorageServiceStats), err
|
||||
}
|
||||
|
||||
// getStatisticsPreparer prepares the GetStatistics request.
|
||||
func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("restype", "service")
|
||||
params.Set("comp", "stats")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// getStatisticsResponder handles the response to the GetStatistics request.
|
||||
func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &StorageServiceStats{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified
|
||||
// account
|
||||
//
|
||||
// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a
|
||||
// string value that identifies the portion of the list of containers to be returned with the next listing operation.
|
||||
// The operation returns the NextMarker value within the response body if the listing operation did not return all
|
||||
// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the
|
||||
// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the
|
||||
// client. maxresults is specifies the maximum number of containers to return. If the request does not specify
|
||||
// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the
|
||||
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the
|
||||
// remainder of the results. For this reason, it is possible that the service will return fewer results than specified
|
||||
// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's
|
||||
// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For
|
||||
// more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: maxresults,
|
||||
constraints: []constraint{{target: "maxresults", name: null, rule: false,
|
||||
chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ListContainersSegmentResponse), err
|
||||
}
|
||||
|
||||
// listContainersSegmentPreparer prepares the ListContainersSegment request.
|
||||
func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("GET", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if prefix != nil && len(*prefix) > 0 {
|
||||
params.Set("prefix", *prefix)
|
||||
}
|
||||
if marker != nil && len(*marker) > 0 {
|
||||
params.Set("marker", *marker)
|
||||
}
|
||||
if maxresults != nil {
|
||||
params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10))
|
||||
}
|
||||
if include != ListContainersIncludeNone {
|
||||
params.Set("include", string(include))
|
||||
}
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "list")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// listContainersSegmentResponder handles the response to the ListContainersSegment request.
|
||||
func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &ListContainersSegmentResponse{rawResponse: resp.Response()}
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer resp.Response().Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Response().Body)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
b = removeBOM(b)
|
||||
err = xml.Unmarshal(b, result)
|
||||
if err != nil {
|
||||
return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage
|
||||
// Analytics and CORS (Cross-Origin Resource Sharing) rules
|
||||
//
|
||||
// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds.
|
||||
// For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
|
||||
// character limit that is recorded in the analytics logs when storage analytics logging is enabled.
|
||||
func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: storageServiceProperties,
|
||||
constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true,
|
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
|
||||
}},
|
||||
}},
|
||||
{target: "storageServiceProperties.HourMetrics", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
|
||||
}},
|
||||
}},
|
||||
{target: "storageServiceProperties.MinuteMetrics", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
|
||||
}},
|
||||
}},
|
||||
{target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false,
|
||||
chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}},
|
||||
}}}},
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*ServiceSetPropertiesResponse), err
|
||||
}
|
||||
|
||||
// setPropertiesPreparer prepares the SetProperties request.
|
||||
func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("restype", "service")
|
||||
params.Set("comp", "properties")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
b, err := xml.Marshal(storageServiceProperties)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to marshal request body")
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
err = req.SetBody(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to set request body")
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// setPropertiesResponder handles the response to the SetProperties request.
|
||||
func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusAccepted)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err
|
||||
}
|
|
@ -0,0 +1,368 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// Constraint stores constraint name, target field name
|
||||
// Rule and chain validations.
|
||||
type constraint struct {
|
||||
// Target field name for validation.
|
||||
target string
|
||||
|
||||
// Constraint name e.g. minLength, MaxLength, Pattern, etc.
|
||||
name string
|
||||
|
||||
// Rule for constraint e.g. greater than 10, less than 5 etc.
|
||||
rule interface{}
|
||||
|
||||
// Chain validations for struct type
|
||||
chain []constraint
|
||||
}
|
||||
|
||||
// Validation stores parameter-wise validation.
|
||||
type validation struct {
|
||||
targetValue interface{}
|
||||
constraints []constraint
|
||||
}
|
||||
|
||||
// Constraint list
|
||||
const (
|
||||
empty = "Empty"
|
||||
null = "Null"
|
||||
readOnly = "ReadOnly"
|
||||
pattern = "Pattern"
|
||||
maxLength = "MaxLength"
|
||||
minLength = "MinLength"
|
||||
maxItems = "MaxItems"
|
||||
minItems = "MinItems"
|
||||
multipleOf = "MultipleOf"
|
||||
uniqueItems = "UniqueItems"
|
||||
inclusiveMaximum = "InclusiveMaximum"
|
||||
exclusiveMaximum = "ExclusiveMaximum"
|
||||
exclusiveMinimum = "ExclusiveMinimum"
|
||||
inclusiveMinimum = "InclusiveMinimum"
|
||||
)
|
||||
|
||||
// Validate method validates constraints on parameter
|
||||
// passed in validation array.
|
||||
func validate(m []validation) error {
|
||||
for _, item := range m {
|
||||
v := reflect.ValueOf(item.targetValue)
|
||||
for _, constraint := range item.constraints {
|
||||
var err error
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
err = validatePtr(v, constraint)
|
||||
case reflect.String:
|
||||
err = validateString(v, constraint)
|
||||
case reflect.Struct:
|
||||
err = validateStruct(v, constraint)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
err = validateInt(v, constraint)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
err = validateFloat(v, constraint)
|
||||
case reflect.Array, reflect.Slice, reflect.Map:
|
||||
err = validateArrayMap(v, constraint)
|
||||
default:
|
||||
err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind()))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateStruct(x reflect.Value, v constraint, name ...string) error {
|
||||
//Get field name from target name which is in format a.b.c
|
||||
s := strings.Split(v.target, ".")
|
||||
f := x.FieldByName(s[len(s)-1])
|
||||
if isZero(f) {
|
||||
return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target))
|
||||
}
|
||||
err := validate([]validation{
|
||||
{
|
||||
targetValue: getInterfaceValue(f),
|
||||
constraints: []constraint{v},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func validatePtr(x reflect.Value, v constraint) error {
|
||||
if v.name == readOnly {
|
||||
if !x.IsNil() {
|
||||
return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if x.IsNil() {
|
||||
return checkNil(x, v)
|
||||
}
|
||||
if v.chain != nil {
|
||||
return validate([]validation{
|
||||
{
|
||||
targetValue: getInterfaceValue(x.Elem()),
|
||||
constraints: v.chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateInt(x reflect.Value, v constraint) error {
|
||||
i := x.Int()
|
||||
r, ok := v.rule.(int)
|
||||
if !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
switch v.name {
|
||||
case multipleOf:
|
||||
if i%int64(r) != 0 {
|
||||
return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r))
|
||||
}
|
||||
case exclusiveMinimum:
|
||||
if i <= int64(r) {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
|
||||
}
|
||||
case exclusiveMaximum:
|
||||
if i >= int64(r) {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
|
||||
}
|
||||
case inclusiveMinimum:
|
||||
if i < int64(r) {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
|
||||
}
|
||||
case inclusiveMaximum:
|
||||
if i > int64(r) {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateFloat(x reflect.Value, v constraint) error {
|
||||
f := x.Float()
|
||||
r, ok := v.rule.(float64)
|
||||
if !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
switch v.name {
|
||||
case exclusiveMinimum:
|
||||
if f <= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than %v", r))
|
||||
}
|
||||
case exclusiveMaximum:
|
||||
if f >= r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than %v", r))
|
||||
}
|
||||
case inclusiveMinimum:
|
||||
if f < r {
|
||||
return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r))
|
||||
}
|
||||
case inclusiveMaximum:
|
||||
if f > r {
|
||||
return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r))
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateString(x reflect.Value, v constraint) error {
|
||||
s := x.String()
|
||||
switch v.name {
|
||||
case empty:
|
||||
if len(s) == 0 {
|
||||
return checkEmpty(x, v)
|
||||
}
|
||||
case pattern:
|
||||
reg, err := regexp.Compile(v.rule.(string))
|
||||
if err != nil {
|
||||
return createError(x, v, err.Error())
|
||||
}
|
||||
if !reg.MatchString(s) {
|
||||
return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule))
|
||||
}
|
||||
case maxLength:
|
||||
if _, ok := v.rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if len(s) > v.rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule))
|
||||
}
|
||||
case minLength:
|
||||
if _, ok := v.rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if len(s) < v.rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule))
|
||||
}
|
||||
case readOnly:
|
||||
if len(s) > 0 {
|
||||
return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name))
|
||||
}
|
||||
if v.chain != nil {
|
||||
return validate([]validation{
|
||||
{
|
||||
targetValue: getInterfaceValue(x),
|
||||
constraints: v.chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateArrayMap(x reflect.Value, v constraint) error {
|
||||
switch v.name {
|
||||
case null:
|
||||
if x.IsNil() {
|
||||
return checkNil(x, v)
|
||||
}
|
||||
case empty:
|
||||
if x.IsNil() || x.Len() == 0 {
|
||||
return checkEmpty(x, v)
|
||||
}
|
||||
case maxItems:
|
||||
if _, ok := v.rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if x.Len() > v.rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len()))
|
||||
}
|
||||
case minItems:
|
||||
if _, ok := v.rule.(int); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if x.Len() < v.rule.(int) {
|
||||
return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len()))
|
||||
}
|
||||
case uniqueItems:
|
||||
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
|
||||
if !checkForUniqueInArray(x) {
|
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
|
||||
}
|
||||
} else if x.Kind() == reflect.Map {
|
||||
if !checkForUniqueInMap(x) {
|
||||
return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x))
|
||||
}
|
||||
} else {
|
||||
return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind()))
|
||||
}
|
||||
case readOnly:
|
||||
if x.Len() != 0 {
|
||||
return createError(x, v, "readonly parameter; must send as nil or empty in request")
|
||||
}
|
||||
case pattern:
|
||||
reg, err := regexp.Compile(v.rule.(string))
|
||||
if err != nil {
|
||||
return createError(x, v, err.Error())
|
||||
}
|
||||
keys := x.MapKeys()
|
||||
for _, k := range keys {
|
||||
if !reg.MatchString(k.String()) {
|
||||
return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name))
|
||||
}
|
||||
if v.chain != nil {
|
||||
return validate([]validation{
|
||||
{
|
||||
targetValue: getInterfaceValue(x),
|
||||
constraints: v.chain,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkNil(x reflect.Value, v constraint) error {
|
||||
if _, ok := v.rule.(bool); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if v.rule.(bool) {
|
||||
return createError(x, v, "value can not be null; required parameter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkEmpty(x reflect.Value, v constraint) error {
|
||||
if _, ok := v.rule.(bool); !ok {
|
||||
return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule))
|
||||
}
|
||||
if v.rule.(bool) {
|
||||
return createError(x, v, "value can not be null or empty; required parameter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForUniqueInArray(x reflect.Value) bool {
|
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
arrOfInterface := make([]interface{}, x.Len())
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
arrOfInterface[i] = x.Index(i).Interface()
|
||||
}
|
||||
m := make(map[interface{}]bool)
|
||||
for _, val := range arrOfInterface {
|
||||
if m[val] {
|
||||
return false
|
||||
}
|
||||
m[val] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkForUniqueInMap(x reflect.Value) bool {
|
||||
if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
mapOfInterface := make(map[interface{}]interface{}, x.Len())
|
||||
keys := x.MapKeys()
|
||||
for _, k := range keys {
|
||||
mapOfInterface[k.Interface()] = x.MapIndex(k).Interface()
|
||||
}
|
||||
m := make(map[interface{}]bool)
|
||||
for _, val := range mapOfInterface {
|
||||
if m[val] {
|
||||
return false
|
||||
}
|
||||
m[val] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getInterfaceValue(x reflect.Value) interface{} {
|
||||
if x.Kind() == reflect.Invalid {
|
||||
return nil
|
||||
}
|
||||
return x.Interface()
|
||||
}
|
||||
|
||||
func isZero(x interface{}) bool {
|
||||
return x == reflect.Zero(reflect.TypeOf(x)).Interface()
|
||||
}
|
||||
|
||||
func createError(x reflect.Value, v constraint, message string) error {
|
||||
return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s",
|
||||
v.target, v.name, getInterfaceValue(x), message))
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package azblob
|
||||
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return "0.0.0"
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
package azblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlobHTTPHeaders contains read/writeable blob properties.
|
||||
type BlobHTTPHeaders struct {
|
||||
ContentType string
|
||||
ContentMD5 []byte
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
ContentDisposition string
|
||||
CacheControl string
|
||||
}
|
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders {
|
||||
return BlobHTTPHeaders{
|
||||
ContentType: bgpr.ContentType(),
|
||||
ContentEncoding: bgpr.ContentEncoding(),
|
||||
ContentLanguage: bgpr.ContentLanguage(),
|
||||
ContentDisposition: bgpr.ContentDisposition(),
|
||||
CacheControl: bgpr.CacheControl(),
|
||||
ContentMD5: bgpr.ContentMD5(),
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
|
||||
return BlobHTTPHeaders{
|
||||
ContentType: dr.ContentType(),
|
||||
ContentEncoding: dr.ContentEncoding(),
|
||||
ContentLanguage: dr.ContentLanguage(),
|
||||
ContentDisposition: dr.ContentDisposition(),
|
||||
CacheControl: dr.CacheControl(),
|
||||
ContentMD5: dr.ContentMD5(),
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
|
||||
type DownloadResponse struct {
|
||||
r *downloadResponse
|
||||
ctx context.Context
|
||||
b BlobURL
|
||||
getInfo HTTPGetterInfo
|
||||
}
|
||||
|
||||
// Body constructs new RetryReader stream for reading data. If a connection failes
|
||||
// while reading, it will make additional requests to reestablish a connection and
|
||||
// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
|
||||
// (the default), returns the original response body and no retries will be performed.
|
||||
func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser {
|
||||
if o.MaxRetryRequests == 0 { // No additional retries
|
||||
return r.Response().Body
|
||||
}
|
||||
return NewRetryReader(r.ctx, r.Response(), r.getInfo, o,
|
||||
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
|
||||
resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count,
|
||||
BlobAccessConditions{
|
||||
ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag},
|
||||
},
|
||||
false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Response(), err
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (r DownloadResponse) Response() *http.Response {
|
||||
return r.r.Response()
|
||||
}
|
||||
|
||||
// NewHTTPHeaders returns the user-modifiable properties for this blob.
|
||||
func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders {
|
||||
return r.r.NewHTTPHeaders()
|
||||
}
|
||||
|
||||
// BlobContentMD5 returns the value for header x-ms-blob-content-md5.
|
||||
func (r DownloadResponse) BlobContentMD5() []byte {
|
||||
return r.r.BlobContentMD5()
|
||||
}
|
||||
|
||||
// ContentMD5 returns the value for header Content-MD5.
|
||||
func (r DownloadResponse) ContentMD5() []byte {
|
||||
return r.r.ContentMD5()
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (r DownloadResponse) StatusCode() int {
|
||||
return r.r.StatusCode()
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (r DownloadResponse) Status() string {
|
||||
return r.r.Status()
|
||||
}
|
||||
|
||||
// AcceptRanges returns the value for header Accept-Ranges.
|
||||
func (r DownloadResponse) AcceptRanges() string {
|
||||
return r.r.AcceptRanges()
|
||||
}
|
||||
|
||||
// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
|
||||
func (r DownloadResponse) BlobCommittedBlockCount() int32 {
|
||||
return r.r.BlobCommittedBlockCount()
|
||||
}
|
||||
|
||||
// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
|
||||
func (r DownloadResponse) BlobSequenceNumber() int64 {
|
||||
return r.r.BlobSequenceNumber()
|
||||
}
|
||||
|
||||
// BlobType returns the value for header x-ms-blob-type.
|
||||
func (r DownloadResponse) BlobType() BlobType {
|
||||
return r.r.BlobType()
|
||||
}
|
||||
|
||||
// CacheControl returns the value for header Cache-Control.
|
||||
func (r DownloadResponse) CacheControl() string {
|
||||
return r.r.CacheControl()
|
||||
}
|
||||
|
||||
// ContentDisposition returns the value for header Content-Disposition.
|
||||
func (r DownloadResponse) ContentDisposition() string {
|
||||
return r.r.ContentDisposition()
|
||||
}
|
||||
|
||||
// ContentEncoding returns the value for header Content-Encoding.
|
||||
func (r DownloadResponse) ContentEncoding() string {
|
||||
return r.r.ContentEncoding()
|
||||
}
|
||||
|
||||
// ContentLanguage returns the value for header Content-Language.
|
||||
func (r DownloadResponse) ContentLanguage() string {
|
||||
return r.r.ContentLanguage()
|
||||
}
|
||||
|
||||
// ContentLength returns the value for header Content-Length.
|
||||
func (r DownloadResponse) ContentLength() int64 {
|
||||
return r.r.ContentLength()
|
||||
}
|
||||
|
||||
// ContentRange returns the value for header Content-Range.
|
||||
func (r DownloadResponse) ContentRange() string {
|
||||
return r.r.ContentRange()
|
||||
}
|
||||
|
||||
// ContentType returns the value for header Content-Type.
|
||||
func (r DownloadResponse) ContentType() string {
|
||||
return r.r.ContentType()
|
||||
}
|
||||
|
||||
// CopyCompletionTime returns the value for header x-ms-copy-completion-time.
|
||||
func (r DownloadResponse) CopyCompletionTime() time.Time {
|
||||
return r.r.CopyCompletionTime()
|
||||
}
|
||||
|
||||
// CopyID returns the value for header x-ms-copy-id.
|
||||
func (r DownloadResponse) CopyID() string {
|
||||
return r.r.CopyID()
|
||||
}
|
||||
|
||||
// CopyProgress returns the value for header x-ms-copy-progress.
|
||||
func (r DownloadResponse) CopyProgress() string {
|
||||
return r.r.CopyProgress()
|
||||
}
|
||||
|
||||
// CopySource returns the value for header x-ms-copy-source.
|
||||
func (r DownloadResponse) CopySource() string {
|
||||
return r.r.CopySource()
|
||||
}
|
||||
|
||||
// CopyStatus returns the value for header x-ms-copy-status.
|
||||
func (r DownloadResponse) CopyStatus() CopyStatusType {
|
||||
return r.r.CopyStatus()
|
||||
}
|
||||
|
||||
// CopyStatusDescription returns the value for header x-ms-copy-status-description.
|
||||
func (r DownloadResponse) CopyStatusDescription() string {
|
||||
return r.r.CopyStatusDescription()
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (r DownloadResponse) Date() time.Time {
|
||||
return r.r.Date()
|
||||
}
|
||||
|
||||
// ETag returns the value for header ETag.
|
||||
func (r DownloadResponse) ETag() ETag {
|
||||
return r.r.ETag()
|
||||
}
|
||||
|
||||
// IsServerEncrypted returns the value for header x-ms-server-encrypted.
|
||||
func (r DownloadResponse) IsServerEncrypted() string {
|
||||
return r.r.IsServerEncrypted()
|
||||
}
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
func (r DownloadResponse) LastModified() time.Time {
|
||||
return r.r.LastModified()
|
||||
}
|
||||
|
||||
// LeaseDuration returns the value for header x-ms-lease-duration.
|
||||
func (r DownloadResponse) LeaseDuration() LeaseDurationType {
|
||||
return r.r.LeaseDuration()
|
||||
}
|
||||
|
||||
// LeaseState returns the value for header x-ms-lease-state.
|
||||
func (r DownloadResponse) LeaseState() LeaseStateType {
|
||||
return r.r.LeaseState()
|
||||
}
|
||||
|
||||
// LeaseStatus returns the value for header x-ms-lease-status.
|
||||
func (r DownloadResponse) LeaseStatus() LeaseStatusType {
|
||||
return r.r.LeaseStatus()
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (r DownloadResponse) RequestID() string {
|
||||
return r.r.RequestID()
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (r DownloadResponse) Version() string {
|
||||
return r.r.Version()
|
||||
}
|
||||
|
||||
// NewMetadata returns user-defined key/value pairs.
|
||||
func (r DownloadResponse) NewMetadata() Metadata {
|
||||
return r.r.NewMetadata()
|
||||
}
|
|
@ -64,15 +64,14 @@ type (
|
|||
|
||||
// AADSASCredentialWithEnvironmentVars configures the TokenProvider using the environment variables available
|
||||
//
|
||||
// 1. Client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and
|
||||
// "AZURE_CLIENT_SECRET"
|
||||
// 1. Client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and
|
||||
// "AZURE_CLIENT_SECRET"
|
||||
//
|
||||
// 2. Client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID",
|
||||
// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD"
|
||||
// 2. Client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID",
|
||||
// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD"
|
||||
//
|
||||
// 3. Managed Service Identity (MSI): attempt to authenticate via MSI
|
||||
//
|
||||
//
|
||||
// The Azure Environment used can be specified using the name of the Azure Environment set in "AZURE_ENVIRONMENT" var.
|
||||
func AADSASCredentialWithEnvironmentVars() AADSASCredentialOption {
|
||||
return func(config *aad.TokenProviderConfiguration) error {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
azblobvendor "github.com/Azure/azure-event-hubs-go/v3/internal/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
|
@ -74,19 +75,19 @@ func (ts *testSuite) TestCredential() {
|
|||
ts.T().Error(err)
|
||||
}
|
||||
|
||||
containerURL := azblob.NewContainerURL(*fooURL, pipeline)
|
||||
containerURL := azblobvendor.NewContainerURL(*fooURL, pipeline)
|
||||
defer func() {
|
||||
if res, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{}); err != nil {
|
||||
if res, err := containerURL.Delete(ctx, azblobvendor.ContainerAccessConditions{}); err != nil {
|
||||
log.Fatal(err, res)
|
||||
}
|
||||
}()
|
||||
_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
_, err = containerURL.Create(ctx, azblobvendor.Metadata{}, azblobvendor.PublicAccessNone)
|
||||
if err != nil {
|
||||
ts.T().Error(err)
|
||||
}
|
||||
|
||||
blobURL := containerURL.NewBlobURL(blobName).ToBlockBlobURL()
|
||||
_, err = blobURL.Upload(ctx, strings.NewReader(message), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
|
||||
_, err = blobURL.Upload(ctx, strings.NewReader(message), azblobvendor.BlobHTTPHeaders{}, azblobvendor.Metadata{}, azblobvendor.BlobAccessConditions{})
|
||||
if err != nil {
|
||||
ts.T().Error(err)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/Azure/azure-amqp-common-go/v3/auth"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
|
||||
"github.com/Azure/azure-event-hubs-go/v3"
|
||||
eventhub "github.com/Azure/azure-event-hubs-go/v3"
|
||||
"github.com/Azure/azure-event-hubs-go/v3/eph"
|
||||
)
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/Azure/azure-event-hubs-go/v3/eph"
|
||||
"github.com/Azure/azure-event-hubs-go/v3/persist"
|
||||
|
||||
azblobvendor "github.com/Azure/azure-event-hubs-go/v3/internal/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
@ -55,8 +56,8 @@ type (
|
|||
processor *eph.EventProcessorHost
|
||||
leaseDuration time.Duration
|
||||
credential Credential
|
||||
containerURL *azblob.ContainerURL
|
||||
serviceURL *azblob.ServiceURL
|
||||
containerURL *azblobvendor.ContainerURL
|
||||
serviceURL *azblobvendor.ServiceURL
|
||||
containerName string
|
||||
accountName string
|
||||
blobPathPrefix string
|
||||
|
@ -70,9 +71,9 @@ type (
|
|||
storageLease struct {
|
||||
*eph.Lease
|
||||
leaser *LeaserCheckpointer
|
||||
Checkpoint *persist.Checkpoint `json:"checkpoint"`
|
||||
State azblob.LeaseStateType `json:"state"`
|
||||
Token string `json:"token"`
|
||||
Checkpoint *persist.Checkpoint `json:"checkpoint"`
|
||||
State azblobvendor.LeaseStateType `json:"state"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Credential is a wrapper for the Azure Storage azblob.Credential
|
||||
|
@ -106,7 +107,7 @@ func NewStorageLeaserCheckpointer(credential Credential, accountName, containerN
|
|||
return nil, err
|
||||
}
|
||||
|
||||
svURL := azblob.NewServiceURL(*storageURL, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
|
||||
svURL := azblobvendor.NewServiceURL(*storageURL, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
|
||||
containerURL := svURL.NewContainerURL(containerName)
|
||||
|
||||
ls := &LeaserCheckpointer{
|
||||
|
@ -146,13 +147,13 @@ func (sl *LeaserCheckpointer) StoreExists(ctx context.Context) (bool, error) {
|
|||
defer span.End()
|
||||
|
||||
containerURL := sl.serviceURL.NewContainerURL(sl.containerName)
|
||||
_, err := containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
_, err := containerURL.GetProperties(ctx, azblobvendor.LeaseAccessConditions{})
|
||||
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var respErr azblob.ResponseError
|
||||
var respErr azblobvendor.ResponseError
|
||||
|
||||
if errors.As(err, &respErr) {
|
||||
if respErr.Response().StatusCode == http.StatusNotFound {
|
||||
|
@ -177,15 +178,15 @@ func (sl *LeaserCheckpointer) EnsureStore(ctx context.Context) error {
|
|||
|
||||
if !ok {
|
||||
containerURL := sl.serviceURL.NewContainerURL(sl.containerName)
|
||||
_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
_, err := containerURL.Create(ctx, azblobvendor.Metadata{}, azblobvendor.PublicAccessNone)
|
||||
|
||||
if err != nil {
|
||||
var storageErr azblob.StorageError
|
||||
var storageErr azblobvendor.StorageError
|
||||
|
||||
if errors.As(err, &storageErr) {
|
||||
// we're okay if the container has been created - we're basically racing against
|
||||
// other LeaserCheckpointers.
|
||||
if storageErr.ServiceCode() != azblob.ServiceCodeContainerAlreadyExists {
|
||||
if storageErr.ServiceCode() != azblobvendor.ServiceCodeContainerAlreadyExists {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
@ -206,7 +207,7 @@ func (sl *LeaserCheckpointer) DeleteStore(ctx context.Context) error {
|
|||
span, ctx := startConsumerSpanFromContext(ctx, "storage.LeaserCheckpointer.DeleteStore")
|
||||
defer span.End()
|
||||
|
||||
_, err := sl.containerURL.Delete(ctx, azblob.ContainerAccessConditions{})
|
||||
_, err := sl.containerURL.Delete(ctx, azblobvendor.ContainerAccessConditions{})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -267,7 +268,7 @@ func (sl *LeaserCheckpointer) DeleteLease(ctx context.Context, partitionID strin
|
|||
span, ctx := startConsumerSpanFromContext(ctx, "storage.LeaserCheckpointer.DeleteLease")
|
||||
defer span.End()
|
||||
|
||||
_, err := sl.containerURL.NewBlobURL(sl.blobPathPrefix+partitionID).Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
|
||||
_, err := sl.containerURL.NewBlobURL(sl.blobPathPrefix+partitionID).Delete(ctx, azblobvendor.DeleteSnapshotsOptionInclude, azblobvendor.BlobAccessConditions{})
|
||||
delete(sl.leases, partitionID)
|
||||
return err
|
||||
}
|
||||
|
@ -287,7 +288,7 @@ func (sl *LeaserCheckpointer) AcquireLease(ctx context.Context, partitionID stri
|
|||
return nil, false, nil
|
||||
}
|
||||
|
||||
res, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
|
||||
res, err := blobURL.GetProperties(ctx, azblobvendor.BlobAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return nil, false, err
|
||||
|
@ -300,15 +301,15 @@ func (sl *LeaserCheckpointer) AcquireLease(ctx context.Context, partitionID stri
|
|||
}
|
||||
|
||||
newToken := uuidToken.String()
|
||||
if res.LeaseState() == azblob.LeaseStateLeased {
|
||||
if res.LeaseState() == azblobvendor.LeaseStateLeased {
|
||||
// is leased by someone else due to a race to acquire
|
||||
_, err := blobURL.ChangeLease(ctx, lease.Token, newToken, azblob.ModifiedAccessConditions{})
|
||||
_, err := blobURL.ChangeLease(ctx, lease.Token, newToken, azblobvendor.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return nil, false, err
|
||||
}
|
||||
} else {
|
||||
_, err = blobURL.AcquireLease(ctx, newToken, int32(sl.leaseDuration.Round(time.Second).Seconds()), azblob.ModifiedAccessConditions{})
|
||||
_, err = blobURL.AcquireLease(ctx, newToken, int32(sl.leaseDuration.Round(time.Second).Seconds()), azblobvendor.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return nil, false, err
|
||||
|
@ -340,7 +341,7 @@ func (sl *LeaserCheckpointer) RenewLease(ctx context.Context, partitionID string
|
|||
return nil, false, errors.New("lease was not found")
|
||||
}
|
||||
|
||||
_, err := blobURL.RenewLease(ctx, lease.Token, azblob.ModifiedAccessConditions{})
|
||||
_, err := blobURL.RenewLease(ctx, lease.Token, azblobvendor.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return nil, false, err
|
||||
|
@ -362,7 +363,7 @@ func (sl *LeaserCheckpointer) ReleaseLease(ctx context.Context, partitionID stri
|
|||
return false, errors.New("lease was not found")
|
||||
}
|
||||
|
||||
_, err := blobURL.ReleaseLease(ctx, lease.Token, azblob.ModifiedAccessConditions{})
|
||||
_, err := blobURL.ReleaseLease(ctx, lease.Token, azblobvendor.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return false, err
|
||||
|
@ -393,7 +394,7 @@ func (sl *LeaserCheckpointer) updateLease(ctx context.Context, partitionID strin
|
|||
return nil, false, errors.New("lease was not found")
|
||||
}
|
||||
|
||||
_, err := blobURL.RenewLease(ctx, lease.Token, azblob.ModifiedAccessConditions{})
|
||||
_, err := blobURL.RenewLease(ctx, lease.Token, azblobvendor.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
tab.For(ctx).Error(err)
|
||||
return nil, false, err
|
||||
|
@ -598,8 +599,8 @@ func (sl *LeaserCheckpointer) uploadLease(ctx context.Context, lease *storageLea
|
|||
return err
|
||||
}
|
||||
reader := bytes.NewReader(jsonLease)
|
||||
_, err = blobURL.ToBlockBlobURL().Upload(ctx, reader, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{
|
||||
LeaseAccessConditions: azblob.LeaseAccessConditions{
|
||||
_, err = blobURL.ToBlockBlobURL().Upload(ctx, reader, azblobvendor.BlobHTTPHeaders{}, azblobvendor.Metadata{}, azblobvendor.BlobAccessConditions{
|
||||
LeaseAccessConditions: azblobvendor.LeaseAccessConditions{
|
||||
LeaseID: lease.Token,
|
||||
},
|
||||
})
|
||||
|
@ -622,8 +623,8 @@ func (sl *LeaserCheckpointer) createOrGetLease(ctx context.Context, partitionID
|
|||
return nil, err
|
||||
}
|
||||
reader := bytes.NewReader(jsonLease)
|
||||
res, err := blobURL.ToBlockBlobURL().Upload(ctx, reader, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{
|
||||
ModifiedAccessConditions: azblob.ModifiedAccessConditions{
|
||||
res, err := blobURL.ToBlockBlobURL().Upload(ctx, reader, azblobvendor.BlobHTTPHeaders{}, azblobvendor.Metadata{}, azblobvendor.BlobAccessConditions{
|
||||
ModifiedAccessConditions: azblobvendor.ModifiedAccessConditions{
|
||||
IfNoneMatch: "*",
|
||||
},
|
||||
})
|
||||
|
@ -643,14 +644,14 @@ func (sl *LeaserCheckpointer) getLease(ctx context.Context, partitionID string)
|
|||
defer span.End()
|
||||
|
||||
blobURL := sl.containerURL.NewBlobURL(sl.blobPathPrefix + partitionID)
|
||||
res, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
|
||||
res, err := blobURL.Download(ctx, 0, azblobvendor.CountToEnd, azblobvendor.BlobAccessConditions{}, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sl.leaseFromResponse(res)
|
||||
}
|
||||
|
||||
func (sl *LeaserCheckpointer) leaseFromResponse(res *azblob.DownloadResponse) (*storageLease, error) {
|
||||
func (sl *LeaserCheckpointer) leaseFromResponse(res *azblobvendor.DownloadResponse) (*storageLease, error) {
|
||||
b, err := ioutil.ReadAll(res.Response().Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -684,7 +685,7 @@ func (s *storageLease) IsExpired(ctx context.Context) bool {
|
|||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return lease.State != azblob.LeaseStateLeased
|
||||
return lease.State != azblobvendor.LeaseStateLeased
|
||||
}
|
||||
|
||||
func (s *storageLease) String() string {
|
||||
|
|
|
@ -2,5 +2,5 @@ package eventhub
|
|||
|
||||
const (
|
||||
// Version is the semantic version number
|
||||
Version = "3.3.18"
|
||||
Version = "3.3.19"
|
||||
)
|
||||
|
|
Загрузка…
Ссылка в новой задаче