Merge pull request #27 from XiaoningLiu/10.2.0-preview-release

10.2.0 preview release
This commit is contained in:
Vincent Jiang (LEI) 2018-11-27 18:13:11 -08:00 коммит произвёл GitHub
Родитель f9deab4603 d84fd99715
Коммит 09f6a7ffe0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
39 изменённых файлов: 2926 добавлений и 819 удалений

Просмотреть файл

@ -10,9 +10,9 @@ The Azure Storage development team uses Visual Studio Code. However, any preferr
### Install
- Node.js valid LTS versions (>=6.5.0)
- Browsers like Chrome, Edge or Firefox
- Clone the source code from GitHub
* Node.js valid LTS versions (>=6.5.0)
* Browsers like Chrome, Edge or Firefox
* Clone the source code from GitHub
## Tests
@ -32,24 +32,19 @@ You need to set up CORS rules for your storage account if you need to develop fo
For example, you can create following CORS settings for debugging. But please customize the settings carefully according to your requirements in production environment.
- Allowed origins: *
- Allowed verbs: DELETE,GET,HEAD,MERGE,POST,OPTIONS,PUT
- Allowed headers: *
- Exposed headers: *
- Maximum age (seconds): 86400
* Allowed origins: *
* Allowed verbs: DELETE,GET,HEAD,MERGE,POST,OPTIONS,PUT
* Allowed headers: *
* Exposed headers: *
* Maximum age (seconds): 86400
### Building
This project is based on TypeScript. For Node.js, generate commonJS module formats, build with:
This project is based on TypeScript. For Node.js, generate commonJS module formats and browser bundles, build with:
```bash
npm run build:cjs
```
Generate JavaScript bundles for browsers:
```bash
npm run build:browser
npm install
npm run build
```
### Running
@ -58,6 +53,7 @@ To actually run tests in Node.js:
```bash
npm install
npm run build
npm run test:node
```
@ -65,9 +61,12 @@ Run tests in Browsers. After installed Chrome, the default testing browser:
```bash
npm install
npm run build
npm test:browser
```
Browser testing is based on Karma, you can change default testing browser by modifying karma.conf.js file.
### Testing Features
As you develop a feature, you'll need to write tests to ensure quality. You should also run existing tests related to your change to address any unexpected breaks in both Node.js and Browsers.
@ -78,16 +77,16 @@ As you develop a feature, you'll need to write tests to ensure quality. You shou
The following are the minimum requirements for any pull request that must be met before contributions can be accepted.
- Make sure you've signed the CLA before you start working on any change.
- Discuss any proposed contribution with the team via a GitHub issue **before** starting development.
- Code must be professional quality
- No style issues
- You should strive to mimic the style with which we have written the library
- Clean, well-commented, well-designed code
- Try to limit the number of commits for a feature to 1-2. If you end up having too many we may ask you to squash your changes into fewer commits.
* Make sure you've signed the CLA before you start working on any change.
* Discuss any proposed contribution with the team via a GitHub issue **before** starting development.
* Code must be professional quality
* No style issues
* You should strive to mimic the style with which we have written the library
* Clean, well-commented, well-designed code
* Try to limit the number of commits for a feature to 1-2. If you end up having too many we may ask you to squash your changes into fewer commits.
- ChangeLog.md needs to be updated describing the new change
- Thoroughly test your feature
* ChangeLog.md needs to be updated describing the new change
* Thoroughly test your feature
### Branching Policy

Просмотреть файл

@ -1,6 +1,7 @@
# Azure Storage SDK V10 for JavaScript
* @azure/storage-blob [![npm version](https://badge.fury.io/js/%40azure%2Fstorage-blob.svg)](https://badge.fury.io/js/%40azure%2Fstorage-blob)
* [API Reference documentation](https://docs.microsoft.com/en-us/javascript/api/overview/azure/storage/client?view=azure-node-preview)
## Introduction
@ -39,7 +40,7 @@ This library depends on following ES6 features which need external polyfills loa
#### Differences between Node.js and browsers
There are differences between Node.js and browsers runtime. When getting start with this SDK, pay attention to APIs or classes marked with *"ONLY AVAILABLE IN NODE.JS RUNTIME"* or *"ONLY AVAILABLE IN BROWSERS"*.
There are differences between Node.js and browsers runtime. When getting start with this SDK, pay attention to APIs or classes marked with _"ONLY AVAILABLE IN NODE.JS RUNTIME"_ or _"ONLY AVAILABLE IN BROWSERS"_.
##### Following features, interfaces, classes or functions are only available in Node.js
@ -104,10 +105,10 @@ You need to set up [Cross-Origin Resource Sharing (CORS)](https://docs.microsoft
For example, you can create following CORS settings for debugging. But please customize the settings carefully according to your requirements in production environment.
* Allowed origins: *
* Allowed origins: \*
* Allowed verbs: DELETE,GET,HEAD,MERGE,POST,OPTIONS,PUT
* Allowed headers: *
* Exposed headers: *
* Allowed headers: \*
* Exposed headers: \*
* Maximum age (seconds): 86400
## SDK Architecture
@ -143,12 +144,12 @@ async function main() {
// Use TokenCredential with OAuth token
const tokenCredential = new TokenCredential("token");
tokenCredential.token = "renewedToken";
tokenCredential.token = "renewedToken"; // Renew the token by updating token field of token credential
// Use AnonymousCredential when url already includes a SAS signature
const anonymousCredential = new AnonymousCredential();
// Use sharedKeyCredential, tokenCredential or tokenCredential to create a pipeline
// Use sharedKeyCredential, tokenCredential or anonymousCredential to create a pipeline
const pipeline = StorageURL.newPipeline(sharedKeyCredential);
// List containers
@ -165,7 +166,7 @@ async function main() {
marker
);
marker = listContainersResponse.marker;
marker = listContainersResponse.nextMarker;
for (const container of listContainersResponse.containerItems) {
console.log(`Container: ${container.name}`);
}
@ -197,13 +198,14 @@ async function main() {
);
// List blobs
marker = undefined;
do {
const listBlobsResponse = await containerURL.listBlobFlatSegment(
Aborter.none,
marker
);
marker = listBlobsResponse.marker;
marker = listBlobsResponse.nextMarker;
for (const blob of listBlobsResponse.segment.blobItems) {
console.log(`Blob: ${blob.name}`);
}
@ -215,7 +217,7 @@ async function main() {
const downloadBlockBlobResponse = await blobURL.download(Aborter.none, 0);
console.log(
"Downloaded blob content",
downloadBlockBlobResponse.readableStreamBody.read(content.length).toString()
await streamToString(downloadBlockBlobResponse.readableStreamBody)
);
// Delete container
@ -224,6 +226,20 @@ async function main() {
console.log("deleted container");
}
// A helper method used to read a Node.js readable stream into string
async function streamToString(readableStream) {
return new Promise((resolve, reject) => {
const chunks = [];
readableStream.on("data", data => {
chunks.push(data.toString());
});
readableStream.on("end", () => {
resolve(chunks.join(""));
});
readableStream.on("error", reject);
});
}
// An async method returns a Promise object, which is compatible with then().catch() coding style.
main()
.then(() => {

Просмотреть файл

@ -1,5 +1,10 @@
# Breaking Changes
2018.11 10.2.0-preview
* Updated names of exported interfaces `IHTTPPipelineLogger` & `IHTTPClient` to `IHttpPipelineLogger` & `IHttpClient`.
* For `setMetadata()` and `setHTTPHeaders()`, `metadata` and `blobHTTPHeaders` are moved from `options` into top level parameter list.
2018.09 10.1.0-preview
* `Aborter.None` is renamed to `Aborter.none` for JavaScript naming conventions.

Просмотреть файл

@ -1,5 +1,19 @@
# Changelog
2018.11 10.2.0-preview
* [Breaking] Updated names of exported interfaces `IHTTPPipelineLogger` & `IHTTPClient` to `IHttpPipelineLogger` & `IHttpClient`.
* [Breaking] For `setMetadata()` and `setHTTPHeaders()`, `metadata` and `blobHTTPHeaders` are moved from `options` into top level parameter list.
* Fixed bugs and typos in samples.
* Fixed a bug during generateAccountSASQueryParameters() that generated signature is not valid.
* Fixed a bug during generateBlobSASQueryParameters() that cache-control, content-type, content-disposition, content-encoding and content-language are not supported.
* Fixed a bug in SAS generation that start and expiry time format is not correct.
* Removed `File` from `uploadBrowserDataToBlockBlob` parameter type list, because `File` extends `Blob` which is already in the list.
* Fixed typos in `IRange` comments.
* Removed useless `marker` field from option of `ServiceURL.listContainersSegment` method.
* Fixed a bug that `timeout` parameter should use second as unit instead of millisecond.
* Added stream retry when `BlobURL.download` response stream unexcepted ends.
2018.09 10.1.0-preview
* Fixed sharedkey authentication error when blob names have spaces.

Просмотреть файл

@ -1,6 +1,7 @@
# Azure Storage SDK V10 for JavaScript - Blob
[![npm version](https://badge.fury.io/js/%40azure%2Fstorage-blob.svg)](https://badge.fury.io/js/%40azure%2Fstorage-blob)
* [![npm version](https://badge.fury.io/js/%40azure%2Fstorage-blob.svg)](https://badge.fury.io/js/%40azure%2Fstorage-blob)
* [API Reference documentation](https://docs.microsoft.com/en-us/javascript/api/%40azure/storage-blob/index?view=azure-node-preview)
## Introduction
@ -39,7 +40,7 @@ This library depends on following ES6 features which need external polyfills loa
#### Differences between Node.js and browsers
There are differences between Node.js and browsers runtime. When getting start with this SDK, pay attention to APIs or classes marked with *"ONLY AVAILABLE IN NODE.JS RUNTIME"* or *"ONLY AVAILABLE IN BROWSERS"*.
There are differences between Node.js and browsers runtime. When getting start with this SDK, pay attention to APIs or classes marked with _"ONLY AVAILABLE IN NODE.JS RUNTIME"_ or _"ONLY AVAILABLE IN BROWSERS"_.
##### Following features, interfaces, classes or functions are only available in Node.js
@ -104,10 +105,10 @@ You need to set up [Cross-Origin Resource Sharing (CORS)](https://docs.microsoft
For example, you can create following CORS settings for debugging. But please customize the settings carefully according to your requirements in production environment.
* Allowed origins: *
* Allowed origins: \*
* Allowed verbs: DELETE,GET,HEAD,MERGE,POST,OPTIONS,PUT
* Allowed headers: *
* Exposed headers: *
* Allowed headers: \*
* Exposed headers: \*
* Maximum age (seconds): 86400
## SDK Architecture
@ -143,12 +144,12 @@ async function main() {
// Use TokenCredential with OAuth token
const tokenCredential = new TokenCredential("token");
tokenCredential.token = "renewedToken";
tokenCredential.token = "renewedToken"; // Renew the token by updating token field of token credential
// Use AnonymousCredential when url already includes a SAS signature
const anonymousCredential = new AnonymousCredential();
// Use sharedKeyCredential, tokenCredential or tokenCredential to create a pipeline
// Use sharedKeyCredential, tokenCredential or anonymousCredential to create a pipeline
const pipeline = StorageURL.newPipeline(sharedKeyCredential);
// List containers
@ -165,7 +166,7 @@ async function main() {
marker
);
marker = listContainersResponse.marker;
marker = listContainersResponse.nextMarker;
for (const container of listContainersResponse.containerItems) {
console.log(`Container: ${container.name}`);
}
@ -197,13 +198,14 @@ async function main() {
);
// List blobs
marker = undefined;
do {
const listBlobsResponse = await containerURL.listBlobFlatSegment(
Aborter.none,
marker
);
marker = listBlobsResponse.marker;
marker = listBlobsResponse.nextMarker;
for (const blob of listBlobsResponse.segment.blobItems) {
console.log(`Blob: ${blob.name}`);
}
@ -215,7 +217,7 @@ async function main() {
const downloadBlockBlobResponse = await blobURL.download(Aborter.none, 0);
console.log(
"Downloaded blob content",
downloadBlockBlobResponse.readableStreamBody.read(content.length).toString()
await streamToString(downloadBlockBlobResponse.readableStreamBody)
);
// Delete container
@ -224,6 +226,20 @@ async function main() {
console.log("deleted container");
}
// A helper method used to read a Node.js readable stream into string
async function streamToString(readableStream) {
return new Promise((resolve, reject) => {
const chunks = [];
readableStream.on("data", data => {
chunks.push(data.toString());
});
readableStream.on("end", () => {
resolve(chunks.join(""));
});
readableStream.on("error", reject);
});
}
// An async method returns a Promise object, which is compatible with then().catch() coding style.
main()
.then(() => {

Просмотреть файл

@ -4,7 +4,7 @@ const zip = require("gulp-zip");
const version = require("./package.json").version;
const zipFileName = `azurestoragejs.blob-${version}.zip`;
gulp.task("zip", () => {
gulp.task("zip", async () => {
gulp
.src([
"browser/azure-storage.blob.js",

Просмотреть файл

@ -0,0 +1,4 @@
// This file is used as a shim of "BlobDownloadResponse" for some browser bundlers
// when trying to bundle "BlobDownloadResponse"
// "BlobDownloadResponse" class is only available in Node.js runtime
export const BlobDownloadResponse = 1;

Просмотреть файл

@ -0,0 +1,465 @@
import { HttpResponse, isNode } from "ms-rest-js";
import { Aborter } from "./Aborter";
import * as Models from "./generated/models";
import { IMetadata } from "./models";
import { IRetriableReadableStreamOptions } from "./utils/RetriableReadableStream";
import {
ReadableStreamGetter,
RetriableReadableStream
} from "./utils/RetriableReadableStream";
/**
* ONLY AVAILABLE IN NODE.JS RUNTIME.
*
* BlobDownloadResponse implements Models.BlobDownloadResponse interface, and in Node.js runtime it will
* automatically retry when internal read stream unexpected ends. (This kind of unexpected ends cannot
* trigger retries defined in pipeline retry policy.)
*
* The readableStreamBody stream will retry underlayer, you can just use it as a normal Node.js
* Readable stream.
*
* @export
* @class BlobDownloadResponse
* @implements {Models.BlobDownloadResponse}
*/
export class BlobDownloadResponse implements Models.BlobDownloadResponse {
/**
* Indicates that the service supports
* requests for partial file content.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get acceptRanges(): string | undefined {
return this.originalResponse.acceptRanges;
}
/**
* Returns if it was previously specified
* for the file.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get cacheControl(): string | undefined {
return this.originalResponse.cacheControl;
}
/**
* Returns the value that was specified
* for the 'x-ms-content-disposition' header and specifies how to process the
* response.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentDisposition(): string | undefined {
return this.originalResponse.contentDisposition;
}
/**
* Returns the value that was specified
* for the Content-Encoding request header.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentEncoding(): string | undefined {
return this.originalResponse.contentEncoding;
}
/**
* Returns the value that was specified
* for the Content-Language request header.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentLanguage(): string | undefined {
return this.originalResponse.contentLanguage;
}
/**
* The current sequence number for a
* page blob. This header is not returned for block blobs or append blobs.
*
* @readonly
* @type {(number | undefined)}
* @memberof BlobDownloadResponse
*/
public get blobSequenceNumber(): number | undefined {
return this.originalResponse.blobSequenceNumber;
}
/**
* The blob's type. Possible values include:
* 'BlockBlob', 'PageBlob', 'AppendBlob'.
*
* @readonly
* @type {(Models.BlobType | undefined)}
* @memberof BlobDownloadResponse
*/
public get blobType(): Models.BlobType | undefined {
return this.originalResponse.blobType;
}
/**
* The number of bytes present in the
* response body.
*
* @readonly
* @type {(number | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentLength(): number | undefined {
return this.originalResponse.contentLength;
}
/**
* If the file has an MD5 hash and the
* request is to read the full file, this response header is returned so that
* the client can check for message content integrity. If the request is to
* read a specified range and the 'x-ms-range-get-content-md5' is set to
* true, then the request returns an MD5 hash for the range, as long as the
* range size is less than or equal to 4 MB. If neither of these sets of
* conditions is true, then no value is returned for the 'Content-MD5'
* header.
*
* @readonly
* @type {(Uint8Array | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentMD5(): Uint8Array | undefined {
return this.originalResponse.contentMD5;
}
/**
* Indicates the range of bytes returned if
* the client requested a subset of the file by setting the Range request
* header.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentRange(): string | undefined {
return this.originalResponse.contentRange;
}
/**
* The content type specified for the file.
* The default content type is 'application/octet-stream'
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get contentType(): string | undefined {
return this.originalResponse.contentType;
}
/**
* Conclusion time of the last attempted
* Copy File operation where this file was the destination file. This value
* can specify the time of a completed, aborted, or failed copy attempt.
*
* @readonly
* @type {(Date | undefined)}
* @memberof BlobDownloadResponse
*/
public get copyCompletionTime(): Date | undefined {
return this.originalResponse.copyCompletionTime;
}
/**
* String identifier for the last attempted Copy
* File operation where this file was the destination file.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get copyId(): string | undefined {
return this.originalResponse.copyId;
}
/**
* Contains the number of bytes copied and
* the total bytes in the source in the last attempted Copy File operation
* where this file was the destination file. Can show between 0 and
* Content-Length bytes copied.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get copyProgress(): string | undefined {
return this.originalResponse.copyProgress;
}
/**
* URL up to 2KB in length that specifies the
* source file used in the last attempted Copy File operation where this file
* was the destination file.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get copySource(): string | undefined {
return this.originalResponse.copySource;
}
/**
* State of the copy operation
* identified by 'x-ms-copy-id'. Possible values include: 'pending',
* 'success', 'aborted', 'failed'
*
* @readonly
* @type {(Models.CopyStatusType | undefined)}
* @memberof BlobDownloadResponse
*/
public get copyStatus(): Models.CopyStatusType | undefined {
return this.originalResponse.copyStatus;
}
/**
* Only appears when
* x-ms-copy-status is failed or pending. Describes cause of fatal or
* non-fatal copy operation failure.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get copyStatusDescription(): string | undefined {
return this.originalResponse.copyStatusDescription;
}
/**
* When a blob is leased,
* specifies whether the lease is of infinite or fixed duration. Possible
* values include: 'infinite', 'fixed'.
*
* @readonly
* @type {(Models.LeaseDurationType | undefined)}
* @memberof BlobDownloadResponse
*/
public get leaseDuration(): Models.LeaseDurationType | undefined {
return this.originalResponse.leaseDuration;
}
/**
* Lease state of the blob. Possible
* values include: 'available', 'leased', 'expired', 'breaking', 'broken'.
*
* @readonly
* @type {(Models.LeaseStateType | undefined)}
* @memberof BlobDownloadResponse
*/
public get leaseState(): Models.LeaseStateType | undefined {
return this.originalResponse.leaseState;
}
/**
* The current lease status of the
* blob. Possible values include: 'locked', 'unlocked'.
*
* @readonly
* @type {(Models.LeaseStatusType | undefined)}
* @memberof BlobDownloadResponse
*/
public get leaseStatus(): Models.LeaseStatusType | undefined {
return this.originalResponse.leaseStatus;
}
/**
* A UTC date/time value generated by the service that
* indicates the time at which the response was initiated.
*
* @readonly
* @type {(Date | undefined)}
* @memberof BlobDownloadResponse
*/
public get date(): Date | undefined {
return this.originalResponse.date;
}
/**
* The number of committed blocks
* present in the blob. This header is returned only for append blobs.
*
* @readonly
* @type {(number | undefined)}
* @memberof BlobDownloadResponse
*/
public get blobCommittedBlockCount(): number | undefined {
return this.originalResponse.blobCommittedBlockCount;
}
/**
* The ETag contains a value that you can use to
* perform operations conditionally, in quotes.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get eTag(): string | undefined {
return this.originalResponse.eTag;
}
public get errorCode(): string | undefined {
return this.originalResponse.errorCode;
}
/**
* The value of this header is set to
* true if the file data and application metadata are completely encrypted
* using the specified algorithm. Otherwise, the value is set to false (when
* the file is unencrypted, or if only parts of the file/application metadata
* are encrypted).
*
* @readonly
* @type {(boolean | undefined)}
* @memberof BlobDownloadResponse
*/
public get isServerEncrypted(): boolean | undefined {
return this.originalResponse.isServerEncrypted;
}
/**
* If the blob has a MD5 hash, and if
* request contains range header (Range or x-ms-range), this response header
* is returned with the value of the whole blob's MD5 value. This value may
* or may not be equal to the value returned in Content-MD5 header, with the
* latter calculated from the requested range.
*
* @readonly
* @type {(Uint8Array | undefined)}
* @memberof BlobDownloadResponse
*/
public get blobContentMD5(): Uint8Array | undefined {
return this.originalResponse.blobContentMD5;
}
/**
* Returns the date and time the file was last
* modified. Any operation that modifies the file or its properties updates
* the last modified time.
*
* @readonly
* @type {(Date | undefined)}
* @memberof BlobDownloadResponse
*/
public get lastModified(): Date | undefined {
return this.originalResponse.lastModified;
}
/**
* A name-value pair
* to associate with a file storage object.
*
* @readonly
* @type {(IMetadata | undefined)}
* @memberof BlobDownloadResponse
*/
public get metadata(): IMetadata | undefined {
return this.originalResponse.metadata;
}
/**
* This header uniquely identifies the request
* that was made and can be used for troubleshooting the request.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get requestId(): string | undefined {
return this.originalResponse.requestId;
}
/**
* Indicates the version of the File service used
* to execute the request.
*
* @readonly
* @type {(string | undefined)}
* @memberof BlobDownloadResponse
*/
public get version(): string | undefined {
return this.originalResponse.version;
}
/**
* The response body as a browser Blob.
* Always undefined in node.js.
*
* @readonly
* @type {(Promise<Blob> | undefined)}
* @memberof BlobDownloadResponse
*/
public get blobBody(): Promise<Blob> | undefined {
return this.originalResponse.blobBody;
}
/**
* The response body as a node.js Readable stream.
* Always undefined in the browser.
*
* It will automatically retry when internal read stream unexpected ends.
*
* @readonly
* @type {(NodeJS.ReadableStream | undefined)}
* @memberof BlobDownloadResponse
*/
public get readableStreamBody(): NodeJS.ReadableStream | undefined {
return isNode ? this.blobDownloadStream : undefined;
}
public get _response(): HttpResponse & {
parsedHeaders: Models.BlobDownloadHeaders;
} {
return this.originalResponse._response;
}
private originalResponse: Models.BlobDownloadResponse;
private blobDownloadStream?: RetriableReadableStream;
/**
* Creates an instance of BlobDownloadResponse.
*
* @param {Aborter} aborter
* @param {Models.BlobDownloadResponse} originalResponse
* @param {ReadableStreamGetter} getter
* @param {number} offset
* @param {number} count
* @param {IRetriableReadableStreamOptions} [options={}]
* @memberof BlobDownloadResponse
*/
public constructor(
aborter: Aborter,
originalResponse: Models.BlobDownloadResponse,
getter: ReadableStreamGetter,
offset: number,
count: number,
options: IRetriableReadableStreamOptions = {}
) {
this.originalResponse = originalResponse;
this.blobDownloadStream = new RetriableReadableStream(
aborter,
this.originalResponse.readableStreamBody!,
getter,
offset,
count,
options
);
}
}

Просмотреть файл

@ -1,14 +1,18 @@
import { TransferProgressEvent } from "ms-rest-js";
import { isNode, TransferProgressEvent } from "ms-rest-js";
import * as Models from "../lib/generated/models";
import { Aborter } from "./Aborter";
import { BlobDownloadResponse } from "./BlobDownloadResponse";
import { ContainerURL } from "./ContainerURL";
import { Blob } from "./generated/operations";
import { rangeToString } from "./IRange";
import { IBlobAccessConditions, IMetadata } from "./models";
import { Pipeline } from "./Pipeline";
import { StorageURL } from "./StorageURL";
import { URLConstants } from "./utils/constants";
import {
DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS,
URLConstants
} from "./utils/constants";
import { appendToURLPath, setURLParameter } from "./utils/utils.common";
export interface IBlobDownloadOptions {
@ -16,6 +20,23 @@ export interface IBlobDownloadOptions {
rangeGetContentMD5?: boolean;
blobAccessConditions?: IBlobAccessConditions;
progress?: (progress: TransferProgressEvent) => void;
/**
* Optional. ONLY AVAILABLE IN NODE.JS.
*
* How many retries will perform when original body download stream unexpected ends.
* Above kind of ends will not trigger retry policy defined in a pipeline,
* because they doesn't emit network errors.
*
* With this option, every additional retry means an additional FileURL.download() request will be made
* from the broken point, until the requested range has been successfully downloaded or maxRetryRequests is reached.
*
* Default value is 5, please set a larger value when loading large files in poor network.
*
* @type {number}
* @memberof IBlobDownloadOptions
*/
maxRetryRequests?: number;
}
export interface IBlobGetPropertiesOptions {
@ -29,11 +50,9 @@ export interface IBlobDeleteOptions {
export interface IBlobSetHTTPHeadersOptions {
blobAccessConditions?: IBlobAccessConditions;
blobHTTPHeaders?: Models.BlobHTTPHeaders;
}
export interface IBlobSetMetadataOptions {
metadata?: IMetadata;
blobAccessConditions?: IBlobAccessConditions;
}
@ -184,31 +203,95 @@ export class BlobURL extends StorageURL {
options: IBlobDownloadOptions = {}
): Promise<Models.BlobDownloadResponse> {
options.blobAccessConditions = options.blobAccessConditions || {};
options.blobAccessConditions.modifiedAccessConditions =
options.blobAccessConditions.modifiedAccessConditions || {};
const res = await this.blobContext.download({
abortSignal: aborter,
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
modifiedAccessConditions:
options.blobAccessConditions.modifiedAccessConditions,
onDownloadProgress: options.progress,
onDownloadProgress: isNode ? undefined : options.progress,
range:
offset === 0 && !count ? undefined : rangeToString({ offset, count }),
rangeGetContentMD5: options.rangeGetContentMD5,
snapshot: options.snapshot
});
// Default axios based HTTP client cannot abort download stream, manually pause/abort it
// Currently, no error will be triggered when network error or abort during reading from response stream
// TODO: Now need to manually validate the date length when stream ends, add download retry in the future
if (res.readableStreamBody) {
aborter.addEventListener("abort", () => {
if (res.readableStreamBody) {
res.readableStreamBody.pause();
}
});
// Return browser response immediately
if (!isNode) {
return res;
}
return res;
// We support retrying when download stream unexpected ends in Node.js runtime
// Following code shouldn't be bundled into browser build, however some
// bundlers may try to bundle following code and "FileReadResponse.ts".
// In this case, "FileDownloadResponse.browser.ts" will be used as a shim of "FileDownloadResponse.ts"
// The config is in package.json "browser" field
if (
options.maxRetryRequests === undefined ||
options.maxRetryRequests < 0
) {
// TODO: Default value or make it a required parameter?
options.maxRetryRequests = DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS;
}
if (res.contentLength === undefined) {
throw new RangeError(
`File download response doesn't contain valid content length header`
);
}
if (!res.eTag) {
throw new RangeError(
`File download response doesn't contain valid etag header`
);
}
return new BlobDownloadResponse(
aborter,
res,
async (start: number): Promise<NodeJS.ReadableStream> => {
const updatedOptions: Models.BlobDownloadOptionalParams = {
leaseAccessConditions: options.blobAccessConditions!
.leaseAccessConditions,
modifiedAccessConditions: {
ifMatch:
options.blobAccessConditions!.modifiedAccessConditions!.ifMatch ||
res.eTag,
ifModifiedSince: options.blobAccessConditions!
.modifiedAccessConditions!.ifModifiedSince,
ifNoneMatch: options.blobAccessConditions!.modifiedAccessConditions!
.ifNoneMatch,
ifUnmodifiedSince: options.blobAccessConditions!
.modifiedAccessConditions!.ifUnmodifiedSince
},
range: rangeToString({
count: offset + res.contentLength! - start,
offset: start
}),
snapshot: options.snapshot
};
// Debug purpose only
// console.log(
// `Read from internal stream, range: ${
// updatedOptions.range
// }, options: ${JSON.stringify(updatedOptions)}`
// );
return (await this.blobContext.download({
abortSignal: aborter,
...updatedOptions
})).readableStreamBody!;
},
offset,
res.contentLength!,
{
maxRetryRequests: options.maxRetryRequests,
progress: options.progress
}
);
}
/**
@ -284,24 +367,28 @@ export class BlobURL extends StorageURL {
/**
* Sets system properties on the blob.
*
* If no option provided, or no value provided for the blob HTTP headers in the options,
* If no value provided, or no value provided for the specificed blob HTTP headers,
* these blob HTTP headers without a value will be cleared.
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties
*
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {Models.BlobHTTPHeaders} [blobHTTPHeaders] If no value provided, or no value provided for
* the specificed blob HTTP headers, these blob HTTP
* headers without a value will be cleared.
* @param {IBlobSetHTTPHeadersOptions} [options]
* @returns {Promise<Models.BlobSetHTTPHeadersResponse>}
* @memberof BlobURL
*/
public async setHTTPHeaders(
aborter: Aborter,
blobHTTPHeaders?: Models.BlobHTTPHeaders,
options: IBlobSetHTTPHeadersOptions = {}
): Promise<Models.BlobSetHTTPHeadersResponse> {
options.blobAccessConditions = options.blobAccessConditions || {};
return this.blobContext.setHTTPHeaders({
abortSignal: aborter,
blobHTTPHeaders: options.blobHTTPHeaders,
blobHTTPHeaders,
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
modifiedAccessConditions:
options.blobAccessConditions.modifiedAccessConditions
@ -311,25 +398,28 @@ export class BlobURL extends StorageURL {
/**
* Sets user-defined metadata for the specified blob as one or more name-value pairs.
*
* If no option provided, or no metadata defined in the option parameter, the blob
* If no option provided, or no metadata defined in the parameter, the blob
* metadata will be removed.
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-metadata
*
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {IMetadata} [metadata] Replace existing metadata with this value.
* If no value provided the existing metadata will be removed.
* @param {IBlobSetMetadataOptions} [options]
* @returns {Promise<Models.BlobSetMetadataResponse>}
* @memberof BlobURL
*/
public async setMetadata(
aborter: Aborter,
metadata?: IMetadata,
options: IBlobSetMetadataOptions = {}
): Promise<Models.BlobSetMetadataResponse> {
options.blobAccessConditions = options.blobAccessConditions || {};
return this.blobContext.setMetadata({
abortSignal: aborter,
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
metadata: options.metadata,
metadata,
modifiedAccessConditions:
options.blobAccessConditions.modifiedAccessConditions
});

Просмотреть файл

@ -23,7 +23,6 @@ export interface IContainerDeleteMethodOptions {
}
export interface IContainerSetMetadataOptions {
metadata?: IMetadata;
containerAccessConditions?: IContainerAccessConditions;
}
@ -149,7 +148,7 @@ export class ContainerURL extends StorageURL {
}
/**
* containersContext provided by protocol layer.
* containerContext provided by protocol layer.
*
* @private
* @type {Containers}
@ -285,18 +284,22 @@ export class ContainerURL extends StorageURL {
/**
* Sets one or more user-defined name-value pairs for the specified container.
*
* If no option provided, or no metadata defined in the option parameter, the container
* If no option provided, or no metadata defined in the parameter, the container
* metadata will be removed.
*
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
*
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {IMetadata} [metadata] Replace existing metadata with this value.
* If no value provided the existing metadata will be removed.
* @param {IContainerSetMetadataOptions} [options]
* @returns {Promise<Models.ContainerSetMetadataResponse>}
* @memberof ContainerURL
*/
public async setMetadata(
aborter: Aborter,
metadata?: IMetadata,
options: IContainerSetMetadataOptions = {}
): Promise<Models.ContainerSetMetadataResponse> {
if (!options.containerAccessConditions) {
@ -331,7 +334,7 @@ export class ContainerURL extends StorageURL {
abortSignal: aborter,
leaseAccessConditions:
options.containerAccessConditions.leaseAccessConditions,
metadata: options.metadata,
metadata,
modifiedAccessConditions:
options.containerAccessConditions.modifiedAccessConditions
});

Просмотреть файл

@ -130,9 +130,9 @@ export function generateAccountSASQueryParameters(
parsedServices,
parsedResourceTypes,
accountSASSignatureValues.startTime
? truncatedISO8061Date(accountSASSignatureValues.startTime)
? truncatedISO8061Date(accountSASSignatureValues.startTime, false)
: "",
truncatedISO8061Date(accountSASSignatureValues.expiryTime),
truncatedISO8061Date(accountSASSignatureValues.expiryTime, false),
accountSASSignatureValues.ipRange
? ipRangeToString(accountSASSignatureValues.ipRange)
: "",
@ -147,8 +147,8 @@ export function generateAccountSASQueryParameters(
return new SASQueryParameters(
version,
parsedPermissions,
signature,
parsedPermissions,
parsedServices,
parsedResourceTypes,
accountSASSignatureValues.protocol,

Просмотреть файл

@ -188,10 +188,10 @@ export function generateBlobSASQueryParameters(
const stringToSign = [
verifiedPermissions ? verifiedPermissions : "",
blobSASSignatureValues.startTime
? truncatedISO8061Date(blobSASSignatureValues.startTime)
? truncatedISO8061Date(blobSASSignatureValues.startTime, false)
: "",
blobSASSignatureValues.expiryTime
? truncatedISO8061Date(blobSASSignatureValues.expiryTime)
? truncatedISO8061Date(blobSASSignatureValues.expiryTime, false)
: "",
getCanonicalName(
sharedKeyCredential.accountName,
@ -232,7 +232,12 @@ export function generateBlobSASQueryParameters(
blobSASSignatureValues.expiryTime,
blobSASSignatureValues.ipRange,
blobSASSignatureValues.identifier,
resource
resource,
blobSASSignatureValues.cacheControl,
blobSASSignatureValues.contentDisposition,
blobSASSignatureValues.contentEncoding,
blobSASSignatureValues.contentLanguage,
blobSASSignatureValues.contentType
);
}

Просмотреть файл

@ -11,7 +11,7 @@ export interface IRange {
* StartByte, larger than or equal 0.
*
* @type {string}
* @memberof IPRange
* @memberof IRange
*/
offset: number;
/**
@ -19,7 +19,7 @@ export interface IRange {
* If not provided, will return bytes from offset to the end.
*
* @type {string}
* @memberof IPRange
* @memberof IRange
*/
count?: number;
}
@ -30,19 +30,19 @@ export interface IRange {
* "bytes=255-" or "bytes=0-511"
*
* @export
* @param {IRange} ipRange
* @param {IRange} iRange
* @returns {string}
*/
export function rangeToString(ipRange: IRange): string {
if (ipRange.offset < 0) {
throw new RangeError(`IPRange.offset cannot be smaller than 0.`);
export function rangeToString(iRange: IRange): string {
if (iRange.offset < 0) {
throw new RangeError(`IRange.offset cannot be smaller than 0.`);
}
if (ipRange.count && ipRange.count <= 0) {
if (iRange.count && iRange.count <= 0) {
throw new RangeError(
`IPRange.count must be larger than 0. Leave it undefined if you want a range from offset to the end.`
`IRange.count must be larger than 0. Leave it undefined if you want a range from offset to the end.`
);
}
return ipRange.count
? `bytes=${ipRange.offset}-${ipRange.offset + ipRange.count - 1}`
: `bytes=${ipRange.offset}-`;
return iRange.count
? `bytes=${iRange.offset}-${iRange.offset + iRange.count - 1}`
: `bytes=${iRange.offset}-`;
}

Просмотреть файл

@ -1,11 +1,13 @@
import {
BaseRequestPolicy,
HttpClient as IHTTPClient,
HttpClient as IHttpClient,
HttpHeaders,
HttpOperationResponse,
HttpPipelineLogger as IHTTPPipelineLogger,
HttpPipelineLogger as IHttpPipelineLogger,
HttpPipelineLogLevel,
RequestPolicy,
RequestPolicyFactory,
RequestPolicyOptions,
ServiceClientOptions,
WebResource
} from "ms-rest-js";
@ -13,13 +15,16 @@ import {
// Export following interfaces and types for customers who want to implement their
// own RequestPolicy or HTTPClient
export {
IHTTPClient,
IHTTPPipelineLogger,
IHttpClient,
IHttpPipelineLogger,
HttpHeaders,
HttpPipelineLogLevel,
HttpOperationResponse,
WebResource,
BaseRequestPolicy
BaseRequestPolicy,
RequestPolicyFactory,
RequestPolicy,
RequestPolicyOptions
};
/**
@ -29,8 +34,8 @@ export {
* @interface IPipelineOptions
*/
export interface IPipelineOptions {
logger?: IHTTPPipelineLogger;
HTTPClient?: IHTTPClient;
logger?: IHttpPipelineLogger;
HTTPClient?: IHttpClient;
}
/**
@ -48,7 +53,7 @@ export class Pipeline {
public readonly options: IPipelineOptions;
/**
* Creates an instance of Pipeline. Customize HTTPClient by implementing IHTTPClient interface.
* Creates an instance of Pipeline. Customize HTTPClient by implementing IHttpClient interface.
*
* @param {RequestPolicyFactory[]} factories
* @param {IPipelineOptions} [options={}]

Просмотреть файл

@ -118,6 +118,46 @@ export class SASQueryParameters {
*/
public readonly signature: string;
/**
* Value for cache-control header in Blob/File Service SAS.
*
* @type {string}
* @memberof SASQueryParameters
*/
public readonly cacheControl?: string;
/**
* Value for content-disposition header in Blob/File Service SAS.
*
* @type {string}
* @memberof SASQueryParameters
*/
public readonly contentDisposition?: string;
/**
* Value for content-encoding header in Blob/File Service SAS.
*
* @type {string}
* @memberof SASQueryParameters
*/
public readonly contentEncoding?: string;
/**
* Value for content-length header in Blob/File Service SAS.
*
* @type {string}
* @memberof SASQueryParameters
*/
public readonly contentLanguage?: string;
/**
* Value for content-type header in Blob/File Service SAS.
*
* @type {string}
* @memberof SASQueryParameters
*/
public readonly contentType?: string;
/**
* Inner value of getter ipRange.
*
@ -158,6 +198,11 @@ export class SASQueryParameters {
* @param {IIPRange} [ipRange] Representing the range of valid IP addresses for this SAS token
* @param {string} [identifier] Representing the signed identifier (only for Service SAS)
* @param {string} [resource] Representing the storage container or blob (only for Service SAS)
* @param {string} [cacheControl] Representing the cache-control header (only for Blob/File Service SAS)
* @param {string} [contentDisposition] Representing the content-disposition header (only for Blob/File Service SAS)
* @param {string} [contentEncoding] Representing the content-encoding header (only for Blob/File Service SAS)
* @param {string} [contentLanguage] Representing the content-language header (only for Blob/File Service SAS)
* @param {string} [contentType] Representing the content-type header (only for Blob/File Service SAS)
* @memberof SASQueryParameters
*/
constructor(
@ -171,7 +216,12 @@ export class SASQueryParameters {
expiryTime?: Date,
ipRange?: IIPRange,
identifier?: string,
resource?: string
resource?: string,
cacheControl?: string,
contentDisposition?: string,
contentEncoding?: string,
contentLanguage?: string,
contentType?: string
) {
this.version = version;
this.services = services;
@ -184,6 +234,11 @@ export class SASQueryParameters {
this.identifier = identifier;
this.resource = resource;
this.signature = signature;
this.cacheControl = cacheControl;
this.contentDisposition = contentDisposition;
this.contentEncoding = contentEncoding;
this.contentLanguage = contentLanguage;
this.contentType = contentType;
}
/**
@ -204,7 +259,12 @@ export class SASQueryParameters {
"si",
"sr",
"sp",
"sig"
"sig",
"rscc",
"rscd",
"rsce",
"rscl",
"rsct"
];
const queries: string[] = [];
@ -226,14 +286,18 @@ export class SASQueryParameters {
this.tryAppendQueryParameter(
queries,
param,
this.startTime ? truncatedISO8061Date(this.startTime) : undefined
this.startTime
? truncatedISO8061Date(this.startTime, false)
: undefined
);
break;
case "se":
this.tryAppendQueryParameter(
queries,
param,
this.expiryTime ? truncatedISO8061Date(this.expiryTime) : undefined
this.expiryTime
? truncatedISO8061Date(this.expiryTime, false)
: undefined
);
break;
case "sip":
@ -255,6 +319,21 @@ export class SASQueryParameters {
case "sig":
this.tryAppendQueryParameter(queries, param, this.signature);
break;
case "rscc":
this.tryAppendQueryParameter(queries, param, this.cacheControl);
break;
case "rscd":
this.tryAppendQueryParameter(queries, param, this.contentDisposition);
break;
case "rsce":
this.tryAppendQueryParameter(queries, param, this.contentEncoding);
break;
case "rscl":
this.tryAppendQueryParameter(queries, param, this.contentLanguage);
break;
case "rsct":
this.tryAppendQueryParameter(queries, param, this.contentType);
break;
}
}
return queries.join("&");

Просмотреть файл

@ -11,16 +11,6 @@ export interface IServiceListContainersSegmentOptions {
* whose name begins with the specified prefix.
*/
prefix?: string;
/**
* @member {string} [marker] A string value that identifies the portion of
* the list of containers to be returned with the next listing operation. The
* operation returns the NextMarker value within the response body if the
* listing operation did not return all containers remaining to be listed
* with the current page. The NextMarker value can be used as the value for
* the marker parameter in a subsequent call to request the next page of list
* items. The marker value is opaque to the client.
*/
marker?: string;
/**
* @member {number} [maxresults] Specifies the maximum number of containers
* to return. If the request does not specify maxresults, or specifies a
@ -166,7 +156,13 @@ export class ServiceURL extends StorageURL {
*
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {string} [marker]
* @param {string} [marker] A string value that identifies the portion of
* the list of containers to be returned with the next listing operation. The
* operation returns the NextMarker value within the response body if the
* listing operation did not return all containers remaining to be listed
* with the current page. The NextMarker value can be used as the value for
* the marker parameter in a subsequent call to request the next page of list
* items. The marker value is opaque to the client.
* @param {IServiceListContainersSegmentOptions} [options]
* @returns {Promise<Models.ServiceListContainersSegmentResponse>}
* @memberof ServiceURL

Просмотреть файл

@ -4,7 +4,7 @@ import { BrowserPolicyFactory } from "./BrowserPolicyFactory";
import { Credential } from "./credentials/Credential";
import { StorageClientContext } from "./generated/storageClientContext";
import { LoggingPolicyFactory } from "./LoggingPolicyFactory";
import { IHTTPClient, IHTTPPipelineLogger, Pipeline } from "./Pipeline";
import { IHttpClient, IHttpPipelineLogger, Pipeline } from "./Pipeline";
import { IRetryOptions, RetryPolicyFactory } from "./RetryPolicyFactory";
import {
ITelemetryOptions,
@ -31,8 +31,8 @@ export interface INewPipelineOptions {
telemetry?: ITelemetryOptions;
retryOptions?: IRetryOptions;
logger?: IHTTPPipelineLogger;
httpClient?: IHTTPClient;
logger?: IHttpPipelineLogger;
httpClient?: IHttpClient;
}
/**

Просмотреть файл

@ -8,10 +8,10 @@ import {
} from "./highlevel.common";
import { Batch } from "./utils/Batch";
import {
BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES,
BLOCK_BLOB_MAX_BLOCKS,
BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES,
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES,
DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES
} from "./utils/constants";
import { generateBlockID } from "./utils/utils.common";
@ -27,14 +27,14 @@ import { generateBlockID } from "./utils/utils.common";
* @export
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {Blob | File | ArrayBuffer | ArrayBufferView} browserData Blob, File, ArrayBuffer or ArrayBufferView
* @param {Blob | ArrayBuffer | ArrayBufferView} browserData Blob, File, ArrayBuffer or ArrayBufferView
* @param {BlockBlobURL} blockBlobURL
* @param {IUploadToBlockBlobOptions} [options]
* @returns {Promise<BlobUploadCommonResponse>}
*/
export async function uploadBrowserDataToBlockBlob(
aborter: Aborter,
browserData: Blob | File | ArrayBuffer | ArrayBufferView,
browserData: Blob | ArrayBuffer | ArrayBufferView,
blockBlobURL: BlockBlobURL,
options?: IUploadToBlockBlobOptions
): Promise<BlobUploadCommonResponse> {
@ -92,8 +92,8 @@ async function UploadSeekableBlobToBlockBlob(
}
if (size > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS);
if (options.blockSize < BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES) {
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
if (options.blockSize < DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES) {
options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES;
}
}
}

Просмотреть файл

@ -90,6 +90,24 @@ export interface IDownloadFromBlobOptions {
*/
blockSize?: number;
/**
* Optional. ONLY AVAILABLE IN NODE.JS.
*
* How many retries will perform when original block download stream unexpected ends.
* Above kind of ends will not trigger retry policy defined in a pipeline,
* because they doesn't emit network errors.
*
* With this option, every additional retry means an additional FileURL.download() request will be made
* from the broken point, until the requested block has been successfully downloaded or
* maxRetryRequestsPerBlock is reached.
*
* Default value is 5, please set a larger value when in poor network.
*
* @type {number}
* @memberof IDownloadFromAzureFileOptions
*/
maxRetryRequestsPerBlock?: number;
/**
* Progress updater.
*

Просмотреть файл

@ -15,10 +15,10 @@ import { IBlobAccessConditions } from "./models";
import { Batch } from "./utils/Batch";
import { BufferScheduler } from "./utils/BufferScheduler";
import {
BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES,
BLOCK_BLOB_MAX_BLOCKS,
BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES,
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES,
DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES
} from "./utils/constants";
import { generateBlockID } from "./utils/utils.common";
import { streamToBuffer } from "./utils/utils.node";
@ -101,13 +101,13 @@ async function uploadResetableStreamToBlockBlob(
);
}
if (options.blockSize === 0) {
if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) {
if (size > BLOCK_BLOB_MAX_BLOCKS * BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES) {
throw new RangeError(`${size} is too larger to upload to a block blob.`);
}
if (size > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS);
if (options.blockSize < BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES) {
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
if (options.blockSize < DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES) {
options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES;
}
}
}
@ -197,7 +197,7 @@ export async function downloadBlobToBuffer(
throw new RangeError("blockSize option must be >= 0");
}
if (options.blockSize === 0) {
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
options.blockSize = DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES;
}
if (offset < 0) {
@ -240,7 +240,8 @@ export async function downloadBlobToBuffer(
off,
chunkEnd - off + 1,
{
blobAccessConditions: options.blobAccessConditions
blobAccessConditions: options.blobAccessConditions,
maxRetryRequests: options.maxRetryRequestsPerBlock
}
);
const stream = response.readableStreamBody!;

Просмотреть файл

@ -6,6 +6,7 @@ export * from "./Aborter";
export * from "./AppendBlobURL";
export * from "./BlobURL";
export * from "./BlockBlobURL";
export * from "./BrowserPolicyFactory";
export * from "./ContainerURL";
export * from "./credentials/AnonymousCredential";
export * from "./credentials/Credential";

Просмотреть файл

@ -12,6 +12,7 @@ export * from "./BlobSASPermissions";
export * from "./IBlobSASSignatureValues";
export * from "./BlobURL";
export * from "./BlockBlobURL";
export * from "./BrowserPolicyFactory";
export * from "./ContainerSASPermissions";
export * from "./ContainerURL";
export * from "./credentials/AnonymousCredential";

Просмотреть файл

@ -7,7 +7,7 @@ import {
RequestPolicyFactory,
RequestPolicyOptions,
RestError,
WebResource
WebResource,
} from "ms-rest-js";
import { IRetryOptions } from "../RetryPolicyFactory";
@ -184,7 +184,7 @@ export class RetryPolicy extends BaseRequestPolicy {
newRequest.url = setURLParameter(
newRequest.url,
URLConstants.Parameters.TIMEOUT,
this.retryOptions.tryTimeoutInMs!.toString()
Math.floor(this.retryOptions.tryTimeoutInMs! / 1000).toString()
);
let response: HttpOperationResponse | undefined;

Просмотреть файл

@ -182,18 +182,21 @@ export class SharedKeyCredentialPolicy extends CredentialPolicy {
canonicalizedResourceString += `/${this.factory.accountName}${path}`;
const queries = getURLQueries(request.url);
if (getURLQueries(request.url)) {
const lowercaseQueries: { [key: string]: string } = {};
if (queries) {
const queryKeys: string[] = [];
for (const key in queries) {
if (queries.hasOwnProperty(key)) {
queryKeys.push(key);
const lowercaseKey = key.toLowerCase();
lowercaseQueries[lowercaseKey] = queries[key];
queryKeys.push(lowercaseKey);
}
}
queryKeys.sort();
for (const key of queryKeys) {
canonicalizedResourceString += `\n${key}:${decodeURIComponent(
queries[key]
lowercaseQueries[key]
)}`;
}
}

Просмотреть файл

@ -0,0 +1,196 @@
import { RestError, TransferProgressEvent } from "ms-rest-js";
import { Readable } from "stream";
import { Aborter } from "../Aborter";
export type ReadableStreamGetter = (
offset: number
) => Promise<NodeJS.ReadableStream>;
export interface IRetriableReadableStreamOptions {
/**
* Max retry count (>=0), undefined or invalid value means no retry
*
* @type {number}
* @memberof IRetriableReadableStreamOptions
*/
maxRetryRequests?: number;
/**
* Read progress event handler
*
* @memberof IRetriableReadableStreamOptions
*/
progress?: (progress: TransferProgressEvent) => void;
/**
* Debug purpose only. Used to inject an unexpected end to existing internal stream,
* to test stream retry works well or not.
*
* When assign it to true, for next incoming "data" event of internal stream,
* RetriableReadableStream will try to emit an "end" event to existing internal
* stream to force it end and start retry from the breaking point.
* The value will then update to "undefined", once the injection works.
*
* @type {boolean}
* @memberof IRetriableReadableStreamOptions
*/
doInjectErrorOnce?: boolean;
}
/**
* ONLY AVAILABLE IN NODE.JS RUNTIME.
*
* A Node.js ReadableStream will internally retry when internal ReadableStream unexpected ends.
*
* @class RetriableReadableStream
* @extends {Readable}
*/
export class RetriableReadableStream extends Readable {
private aborter: Aborter;
private start: number;
private offset: number;
private end: number;
private getter: ReadableStreamGetter;
private source: NodeJS.ReadableStream;
private retries: number = 0;
private maxRetryRequests: number;
private progress?: (progress: TransferProgressEvent) => void;
private options: IRetriableReadableStreamOptions;
/**
* Creates an instance of RetriableReadableStream.
*
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
* goto documents of Aborter for more examples about request cancellation
* @param {NodeJS.ReadableStream} source The current ReadableStream returned from getter
* @param {ReadableStreamGetter} getter A method calling downloading request returning
* a new ReadableStream from specified offset
* @param {number} offset Offset position in original data source to read
* @param {number} count How much data in original data source to read
* @param {IRetriableReadableStreamOptions} [options={}]
* @memberof RetriableReadableStream
*/
public constructor(
aborter: Aborter,
source: NodeJS.ReadableStream,
getter: ReadableStreamGetter,
offset: number,
count: number,
options: IRetriableReadableStreamOptions = {}
) {
super();
this.aborter = aborter;
this.getter = getter;
this.source = source;
this.start = offset;
this.offset = offset;
this.end = offset + count - 1;
this.maxRetryRequests =
options.maxRetryRequests && options.maxRetryRequests >= 0
? options.maxRetryRequests
: 0;
this.progress = options.progress;
this.options = options;
aborter.addEventListener("abort", () => {
this.source.pause();
this.emit(
"error",
new RestError(
"The request was aborted",
RestError.REQUEST_ABORTED_ERROR
)
);
});
this.setSourceDataHandler();
this.setSourceEndHandler();
this.setSourceErrorHandler();
}
public _read() {
if (!this.aborter.aborted) {
this.source.resume();
}
}
private setSourceDataHandler() {
this.source.on("data", (data: Buffer) => {
if (this.options.doInjectErrorOnce) {
this.options.doInjectErrorOnce = undefined;
this.source.pause();
this.source.removeAllListeners("data");
this.source.emit("end");
return;
}
// console.log(
// `Offset: ${this.offset}, Received ${data.length} from internal stream`
// );
this.offset += data.length;
if (this.progress) {
this.progress({ loadedBytes: this.offset - this.start });
}
if (!this.push(data)) {
this.source.pause();
}
});
}
private setSourceEndHandler() {
this.source.on("end", () => {
// console.log(
// `Source stream emits end, offset: ${
// this.offset
// }, dest end : ${this.end}`
// );
if (this.offset - 1 === this.end) {
this.push(null);
} else if (this.offset <= this.end) {
// console.log(
// `retries: ${this.retries}, max retries: ${this.maxRetries}`
// );
if (this.retries < this.maxRetryRequests) {
this.retries += 1;
this.getter(this.offset)
.then(newSource => {
this.source = newSource;
this.setSourceDataHandler();
this.setSourceEndHandler();
this.setSourceErrorHandler();
})
.catch(error => {
this.emit("error", error);
});
} else {
this.emit(
"error",
new Error(
// tslint:disable-next-line:max-line-length
`Data corruption failure: received less data than required and reached maxRetires limitation. Received data offset: ${this
.offset - 1}, data needed offset: ${this.end}, retries: ${
this.retries
}, max retries: ${this.maxRetryRequests}`
)
);
}
} else {
this.emit(
"error",
new Error(
`Data corruption failure: Received more data than original request, data needed offset is ${
this.end
}, received offset: ${this.offset - 1}`
)
);
}
});
}
private setSourceErrorHandler() {
this.source.on("error", error => {
this.emit("error", error);
});
}
}

Просмотреть файл

@ -1,10 +1,11 @@
export const SDK_VERSION: string = "10.1.0-preview";
export const SDK_VERSION: string = "10.2.0-preview";
export const SERVICE_VERSION: string = "2018-03-28";
export const BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES: number = 256 * 1024 * 1024; // 256MB
export const BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES: number = 100 * 1024 * 1024; // 100MB
export const BLOCK_BLOB_MAX_BLOCKS: number = 50000;
export const BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES: number = 4 * 1024 * 1024; // 4MB
export const DEFAULT_BLOB_DOWNLOAD_BLOCK_BYTES: number = 4 * 1024 * 1024; // 4MB
export const DEFAULT_MAX_DOWNLOAD_RETRY_REQUESTS: number = 5;
export const URLConstants = {
Parameters: {

Просмотреть файл

@ -129,12 +129,21 @@ export function getURLQueries(url: string): { [key: string]: string } {
* Rounds a date off to seconds.
*
* @export
* @param {Date} date Input date
* @returns {string} Date string in ISO8061 format, with no milliseconds component
* @param {Date} date
* @param {boolean} [withMilliseconds=true] If true, YYYY-MM-DDThh:mm:ss.fffffffZ will be returned;
* If false, YYYY-MM-DDThh:mm:ssZ will be returned.
* @returns {string} Date string in ISO8061 format, with or without 7 milliseconds component
*/
export function truncatedISO8061Date(date: Date): string {
export function truncatedISO8061Date(
date: Date,
withMilliseconds: boolean = true
): string {
// Date.toISOString() will return like "2018-10-29T06:34:36.139Z"
const dateString = date.toISOString();
return dateString.substring(0, dateString.length - 1) + "0000" + "Z";
return withMilliseconds
? dateString.substring(0, dateString.length - 1) + "0000" + "Z"
: dateString.substring(0, dateString.length - 5) + "Z";
}
/**

1810
blob/package-lock.json сгенерированный

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,6 +1,6 @@
{
"name": "@azure/storage-blob",
"version": "10.1.0-preview",
"version": "10.2.0-preview",
"description": "Microsoft Azure Storage SDK for JavaScript - Blob",
"main": "./dist/index.js",
"module": "./dist-esm/lib/index.js",
@ -8,6 +8,7 @@
"./dist/index.js": "./browser/azure-storage.blob.min.js",
"./dist-esm/lib/index.js": "./dist-esm/lib/index.browser.js",
"./dist-esm/test/utils/index.js": "./dist-esm/test/utils/index.browser.js",
"./dist-esm/lib/BlobDownloadResponse.js": "./dist-esm/lib/BlobDownloadResponse.browser.js",
"os": false,
"process": false
},
@ -22,9 +23,10 @@
},
"devDependencies": {
"@types/mocha": "^5.2.5",
"@types/node": "^10.12.3",
"assert": "^1.4.1",
"es6-promise": "^4.2.4",
"gulp": "^3.9.1",
"gulp": "^4.0.0",
"gulp-zip": "^4.2.0",
"karma": "^3.0.0",
"karma-chrome-launcher": "^2.2.0",
@ -44,19 +46,19 @@
"rollup-plugin-shim": "^1.0.0",
"rollup-plugin-uglify": "^5.0.2",
"rollup-plugin-visualizer": "^0.9.0",
"typescript": "^3.0.1"
"ts-node": "^7.0.1",
"typescript": "^3.1.6"
},
"scripts": {
"test": "npm run test:node && npm run test:browser",
"test:node": "npm run build:test && mocha --no-timeouts dist-test/index.js",
"test:browser": "npm run build:test && karma start --single-run",
"build": "npm run build:es6 && npm run build:nodebrowser && npm run build:browserzip",
"test:node": "mocha --no-timeouts dist-test/index.js",
"test:browser": "karma start --single-run",
"build": "npm run build:es6 && npm run build:nodebrowser && npm run build:browserzip && npm run build:test",
"build:test": "rollup -c rollup.test.config.js",
"build:nodebrowser": "rollup -c",
"build:es6": "tsc -p tsconfig.json",
"build:autorest": "autorest ./swagger/README.md --typescript --use=@microsoft.azure/autorest.typescript@2.0.476",
"build:browserzip": "gulp zip",
"prepare": "npm run build",
"clean": "rimraf dist dist-esm dist-test typings temp browser/*.js* browser/*.zip statistics.html"
},
"repository": {
@ -79,4 +81,4 @@
},
"homepage": "https://github.com/Azure/azure-storage-js#readme",
"sideEffects": false
}
}

Просмотреть файл

@ -24,12 +24,12 @@ async function main() {
// Use TokenCredential with OAuth token
const tokenCredential = new TokenCredential("token");
tokenCredential.token = "renewedToken";
tokenCredential.token = "renewedToken"; // Renew the token by updating token field of token credential
// Use AnonymousCredential when url already includes a SAS signature
const tokenCredential = new AnonymousCredential();
const anonymousCredential = new AnonymousCredential();
// Use sharedKeyCredential, tokenCredential or tokenCredential to create a pipeline
// Use sharedKeyCredential, tokenCredential or anonymousCredential to create a pipeline
const pipeline = StorageURL.newPipeline(sharedKeyCredential);
// List containers
@ -46,7 +46,7 @@ async function main() {
marker
);
marker = listContainersResponse.marker;
marker = listContainersResponse.nextMarker;
for (const container of listContainersResponse.containerItems) {
console.log(`Container: ${container.name}`);
}
@ -78,13 +78,14 @@ async function main() {
);
// List blobs
marker = undefined;
do {
const listBlobsResponse = await containerURL.listBlobFlatSegment(
Aborter.none,
marker
);
marker = listBlobsResponse.marker;
marker = listBlobsResponse.nextMarker;
for (const blob of listBlobsResponse.segment.blobItems) {
console.log(`Blob: ${blob.name}`);
}
@ -96,7 +97,7 @@ async function main() {
const downloadBlockBlobResponse = await blobURL.download(Aborter.none, 0);
console.log(
"Downloaded blob content",
downloadBlockBlobResponse.readableStreamBody.read(content.length).toString()
await streamToString(downloadBlockBlobResponse.readableStreamBody)
);
// Delete container
@ -105,6 +106,20 @@ async function main() {
console.log("deleted container");
}
// A helper method used to read a Node.js readable stream into string
async function streamToString(readableStream) {
return new Promise((resolve, reject) => {
const chunks = [];
readableStream.on("data", data => {
chunks.push(data.toString());
});
readableStream.on("end", () => {
resolve(chunks.join(""));
});
readableStream.on("error", reject);
});
}
// An async method returns a Promise object, which is compatible with then().catch() coding style.
main()
.then(() => {

Просмотреть файл

@ -24,8 +24,8 @@ async function main() {
const localFilePath = "localFilePath";
const pipeline = StorageURL.newPipeline(new AnonymousCredential(), {
// httpClient: MyHTTPClient, // A customized HTTP client implementing IHTTPClient interface
// logger: MyLogger, // A customized logger implementing IHTTPPipelineLogger interface
// httpClient: MyHTTPClient, // A customized HTTP client implementing IHttpClient interface
// logger: MyLogger, // A customized logger implementing IHttpPipelineLogger interface
retryOptions: { maxTries: 4 }, // Retry options
telemetry: { value: "HighLevelSample V1.0.0" } // Customized telemetry string
});

Просмотреть файл

@ -55,7 +55,7 @@ describe("BlobURL", () => {
a: "a",
b: "b"
};
await blobURL.setMetadata(Aborter.none, { metadata });
await blobURL.setMetadata(Aborter.none, metadata);
const result = await blobURL.getProperties(Aborter.none);
assert.deepStrictEqual(result.metadata, metadata);
});
@ -65,7 +65,7 @@ describe("BlobURL", () => {
a: "a",
b: "b"
};
await blobURL.setMetadata(Aborter.none, { metadata });
await blobURL.setMetadata(Aborter.none, metadata);
const result = await blobURL.getProperties(Aborter.none);
assert.deepStrictEqual(result.metadata, metadata);
@ -100,10 +100,9 @@ describe("BlobURL", () => {
: new Uint8Array([1, 2, 3, 4]),
blobContentType: "blobContentType"
};
await blobURL.setHTTPHeaders(Aborter.none, {
blobHTTPHeaders: headers
});
await blobURL.setHTTPHeaders(Aborter.none, headers);
const result = await blobURL.getProperties(Aborter.none);
assert.ok(result.date);
assert.deepStrictEqual(result.blobType, BlobType.BlockBlob);
assert.ok(result.lastModified);
assert.deepStrictEqual(result.metadata, {});

Просмотреть файл

@ -34,9 +34,7 @@ describe("ContainerURL", () => {
keya: "vala",
keyb: "valb"
};
await containerURL.setMetadata(Aborter.none, {
metadata
});
await containerURL.setMetadata(Aborter.none, metadata);
const result = await containerURL.getProperties(Aborter.none);
assert.deepEqual(result.metadata, metadata);

Просмотреть файл

@ -33,9 +33,18 @@ describe("BlockBlobURL Node.js only", () => {
await blockBlobURL.upload(Aborter.none, bodyBuffer, body.length);
const result = await blobURL.download(Aborter.none, 0);
assert.deepStrictEqual(
result.readableStreamBody!.read(body.length)!.toString(),
body
);
const downloadedBody = await new Promise((resolve, reject) => {
const buffer: string[] = [];
result.readableStreamBody!.on("data", (data: Buffer) => {
buffer.push(data.toString());
});
result.readableStreamBody!.on("end", () => {
resolve(buffer.join(""));
});
result.readableStreamBody!.on("error", reject);
});
assert.deepStrictEqual(downloadedBody, body);
});
});

Просмотреть файл

@ -1,20 +1,22 @@
import * as assert from "assert";
import * as fs from "fs";
import * as path from "path";
import { PassThrough } from "stream";
import { BlobURL, BlockBlobURL, ContainerURL } from "../../lib";
import { Aborter } from "../../lib/Aborter";
import {
downloadBlobToBuffer,
uploadFileToBlockBlob,
uploadStreamToBlockBlob
} from "../../lib/highlevel.node";
import { IRetriableReadableStreamOptions } from "../../lib/utils/RetriableReadableStream";
import {
createRandomLocalFile,
getBSU,
getUniqueName,
readStreamToLocalFile
} from "../utils/index";
import { BlobURL, BlockBlobURL, ContainerURL } from "../../lib";
} from "../utils";
// tslint:disable:no-empty
describe("Highlevel", () => {
@ -188,7 +190,10 @@ describe("Highlevel", () => {
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
const downloadFilePath = path.join("./", getUniqueName("downloadFile"));
const downloadFilePath = path.join(
tempFolderPath,
getUniqueName("downloadFile")
);
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadFilePath
@ -201,6 +206,36 @@ describe("Highlevel", () => {
fs.unlinkSync(downloadFilePath);
});
it("uploadStreamToBlockBlob should success for tiny buffers", async () => {
const buf = Buffer.from([0x62, 0x75, 0x66, 0x66, 0x65, 0x72]);
const bufferStream = new PassThrough();
bufferStream.end(buf);
await uploadStreamToBlockBlob(
Aborter.none,
bufferStream,
blockBlobURL,
4 * 1024 * 1024,
20
);
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
const downloadFilePath = path.join(
tempFolderPath,
getUniqueName("downloadFile")
);
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadFilePath
);
const downloadedBuffer = fs.readFileSync(downloadFilePath);
assert.ok(buf.equals(downloadedBuffer));
fs.unlinkSync(downloadFilePath);
});
it("uploadStreamToBlockBlob should abort", async () => {
const rs = fs.createReadStream(tempFileLarge);
const aborter = Aborter.timeout(1);
@ -252,6 +287,7 @@ describe("Highlevel", () => {
const buf = Buffer.alloc(tempFileLargeLength);
await downloadBlobToBuffer(Aborter.none, buf, blockBlobURL, 0, undefined, {
blockSize: 4 * 1024 * 1024,
maxRetryRequestsPerBlock: 5,
parallelism: 20
});
@ -279,6 +315,7 @@ describe("Highlevel", () => {
undefined,
{
blockSize: 4 * 1024 * 1024,
maxRetryRequestsPerBlock: 5,
parallelism: 20
}
);
@ -304,6 +341,7 @@ describe("Highlevel", () => {
try {
await downloadBlobToBuffer(aborter, buf, blockBlobURL, 0, undefined, {
blockSize: 1 * 1024,
maxRetryRequestsPerBlock: 5,
parallelism: 1,
progress: () => {
eventTriggered = true;
@ -313,4 +351,273 @@ describe("Highlevel", () => {
} catch (err) {}
assert.ok(eventTriggered);
});
it("bloburl.download should success when internal stream unexcepted ends at the stream end", async () => {
const uploadResponse = await uploadFileToBlockBlob(
Aborter.none,
tempFileSmall,
blockBlobURL,
{
blockSize: 4 * 1024 * 1024,
parallelism: 20
}
);
let retirableReadableStreamOptions: IRetriableReadableStreamOptions;
const downloadResponse = await blockBlobURL.download(
Aborter.none,
0,
undefined,
{
blobAccessConditions: {
modifiedAccessConditions: {
ifMatch: uploadResponse.eTag
}
},
maxRetryRequests: 1,
progress: ev => {
if (ev.loadedBytes >= tempFileSmallLength) {
retirableReadableStreamOptions.doInjectErrorOnce = true;
}
}
}
);
retirableReadableStreamOptions = (downloadResponse.readableStreamBody! as any)
.options;
const downloadedFile = path.join(
tempFolderPath,
getUniqueName("downloadfile.")
);
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadedFile
);
const downloadedData = await fs.readFileSync(downloadedFile);
const uploadedData = await fs.readFileSync(tempFileSmall);
fs.unlinkSync(downloadedFile);
assert.ok(downloadedData.equals(uploadedData));
});
it("bloburl.download should download full data successfully when internal stream unexcepted ends", async () => {
const uploadResponse = await uploadFileToBlockBlob(
Aborter.none,
tempFileSmall,
blockBlobURL,
{
blockSize: 4 * 1024 * 1024,
parallelism: 20
}
);
let retirableReadableStreamOptions: IRetriableReadableStreamOptions;
let injectedErrors = 0;
const downloadResponse = await blockBlobURL.download(
Aborter.none,
0,
undefined,
{
blobAccessConditions: {
modifiedAccessConditions: {
ifMatch: uploadResponse.eTag
}
},
maxRetryRequests: 3,
progress: () => {
if (injectedErrors++ < 3) {
retirableReadableStreamOptions.doInjectErrorOnce = true;
}
}
}
);
retirableReadableStreamOptions = (downloadResponse.readableStreamBody! as any)
.options;
const downloadedFile = path.join(
tempFolderPath,
getUniqueName("downloadfile.")
);
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadedFile
);
const downloadedData = await fs.readFileSync(downloadedFile);
const uploadedData = await fs.readFileSync(tempFileSmall);
fs.unlinkSync(downloadedFile);
assert.ok(downloadedData.equals(uploadedData));
});
it("bloburl.download should download partial data when internal stream unexcepted ends", async () => {
const uploadResponse = await uploadFileToBlockBlob(
Aborter.none,
tempFileSmall,
blockBlobURL,
{
blockSize: 4 * 1024 * 1024,
parallelism: 20
}
);
const partialSize = 500 * 1024;
let retirableReadableStreamOptions: IRetriableReadableStreamOptions;
let injectedErrors = 0;
const downloadResponse = await blockBlobURL.download(
Aborter.none,
0,
partialSize,
{
blobAccessConditions: {
modifiedAccessConditions: {
ifMatch: uploadResponse.eTag
}
},
maxRetryRequests: 3,
progress: () => {
if (injectedErrors++ < 3) {
retirableReadableStreamOptions.doInjectErrorOnce = true;
}
}
}
);
retirableReadableStreamOptions = (downloadResponse.readableStreamBody! as any)
.options;
const downloadedFile = path.join(
tempFolderPath,
getUniqueName("downloadfile.")
);
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadedFile
);
const downloadedData = await fs.readFileSync(downloadedFile);
const uploadedData = await fs.readFileSync(tempFileSmall);
fs.unlinkSync(downloadedFile);
assert.ok(
downloadedData
.slice(0, partialSize)
.equals(uploadedData.slice(0, partialSize))
);
});
it("bloburl.download should download data failed when exceeding max stream retry requests", async () => {
const uploadResponse = await uploadFileToBlockBlob(
Aborter.none,
tempFileSmall,
blockBlobURL,
{
blockSize: 4 * 1024 * 1024,
parallelism: 20
}
);
const downloadedFile = path.join(
tempFolderPath,
getUniqueName("downloadfile.")
);
let retirableReadableStreamOptions: IRetriableReadableStreamOptions;
let injectedErrors = 0;
let expectedError = false;
try {
const downloadResponse = await blockBlobURL.download(
Aborter.none,
0,
undefined,
{
blobAccessConditions: {
modifiedAccessConditions: {
ifMatch: uploadResponse.eTag
}
},
maxRetryRequests: 0,
progress: () => {
if (injectedErrors++ < 1) {
retirableReadableStreamOptions.doInjectErrorOnce = true;
}
}
}
);
retirableReadableStreamOptions = (downloadResponse.readableStreamBody! as any)
.options;
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadedFile
);
} catch (error) {
expectedError = true;
}
assert.ok(expectedError);
fs.unlinkSync(downloadedFile);
});
it("bloburl.download should abort after retrys", async () => {
const uploadResponse = await uploadFileToBlockBlob(
Aborter.none,
tempFileSmall,
blockBlobURL,
{
blockSize: 4 * 1024 * 1024,
parallelism: 20
}
);
const downloadedFile = path.join(
tempFolderPath,
getUniqueName("downloadfile.")
);
let retirableReadableStreamOptions: IRetriableReadableStreamOptions;
let injectedErrors = 0;
let expectedError = false;
try {
const aborter = Aborter.none;
const downloadResponse = await blockBlobURL.download(
aborter,
0,
undefined,
{
blobAccessConditions: {
modifiedAccessConditions: {
ifMatch: uploadResponse.eTag
}
},
maxRetryRequests: 3,
progress: () => {
if (injectedErrors++ < 2) {
// Triger 2 times of retry
retirableReadableStreamOptions.doInjectErrorOnce = true;
} else {
// Trigger aborter
aborter.abort();
}
}
}
);
retirableReadableStreamOptions = (downloadResponse.readableStreamBody! as any)
.options;
await readStreamToLocalFile(
downloadResponse.readableStreamBody!,
downloadedFile
);
} catch (error) {
expectedError = true;
}
assert.ok(expectedError);
fs.unlinkSync(downloadedFile);
});
});

306
blob/test/node/sas.test.ts Normal file
Просмотреть файл

@ -0,0 +1,306 @@
import * as assert from "assert";
import {
AccountSASPermissions,
AccountSASResourceTypes,
AccountSASServices,
AnonymousCredential,
BlobSASPermissions,
ContainerSASPermissions,
generateAccountSASQueryParameters,
generateBlobSASQueryParameters,
ServiceURL,
SharedKeyCredential,
StorageURL,
} from "../../lib";
import { Aborter } from "../../lib/Aborter";
import { ContainerURL, PageBlobURL, SASProtocol } from "../../lib/index.browser";
import { getBSU, getUniqueName } from "../utils";
describe("Shared Access Signature (SAS) generation Node.js only", () => {
const serviceURL = getBSU();
it("generateAccountSASQueryParameters should work", async () => {
const now = new Date();
now.setMinutes(now.getMinutes() - 5); // Skip clock skew with server
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const sas = generateAccountSASQueryParameters(
{
expiryTime: tmr,
ipRange: { start: "0.0.0.0", end: "255.255.255.255" },
permissions: AccountSASPermissions.parse("rwdlacup").toString(),
protocol: SASProtocol.HTTPSandHTTP,
resourceTypes: AccountSASResourceTypes.parse("sco").toString(),
services: AccountSASServices.parse("btqf").toString(),
startTime: now,
version: "2016-05-31"
},
sharedKeyCredential as SharedKeyCredential
).toString();
const sasURL = `${serviceURL.url}?${sas}`;
const serviceURLWithSAS = new ServiceURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
await serviceURLWithSAS.getAccountInfo(Aborter.none);
});
it("generateAccountSASQueryParameters should not work with invalid permission", async () => {
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const sas = generateAccountSASQueryParameters(
{
expiryTime: tmr,
permissions: AccountSASPermissions.parse("wdlcup").toString(),
resourceTypes: AccountSASResourceTypes.parse("sco").toString(),
services: AccountSASServices.parse("btqf").toString()
},
sharedKeyCredential as SharedKeyCredential
).toString();
const sasURL = `${serviceURL.url}?${sas}`;
const serviceURLWithSAS = new ServiceURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
let error;
try {
await serviceURLWithSAS.getProperties(Aborter.none);
} catch (err) {
error = err;
}
assert.ok(error);
});
it("generateAccountSASQueryParameters should not work with invalid service", async () => {
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const sas = generateAccountSASQueryParameters(
{
expiryTime: tmr,
permissions: AccountSASPermissions.parse("rwdlacup").toString(),
resourceTypes: AccountSASResourceTypes.parse("sco").toString(),
services: AccountSASServices.parse("tqf").toString()
},
sharedKeyCredential as SharedKeyCredential
).toString();
const sasURL = `${serviceURL.url}?${sas}`;
const serviceURLWithSAS = new ServiceURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
let error;
try {
await serviceURLWithSAS.getProperties(Aborter.none);
} catch (err) {
error = err;
}
assert.ok(error);
});
it("generateAccountSASQueryParameters should not work with invalid resource type", async () => {
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const sas = generateAccountSASQueryParameters(
{
expiryTime: tmr,
ipRange: { start: "0.0.0.0", end: "255.255.255.255" },
permissions: AccountSASPermissions.parse("rwdlacup").toString(),
protocol: SASProtocol.HTTPSandHTTP,
resourceTypes: AccountSASResourceTypes.parse("co").toString(),
services: AccountSASServices.parse("btqf").toString(),
version: "2016-05-31"
},
sharedKeyCredential as SharedKeyCredential
).toString();
const sasURL = `${serviceURL.url}?${sas}`;
const serviceURLWithSAS = new ServiceURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
let error;
try {
await serviceURLWithSAS.getProperties(Aborter.none);
} catch (err) {
error = err;
}
assert.ok(error);
});
it("generateBlobSASQueryParameters should work for container", async () => {
const now = new Date();
now.setMinutes(now.getMinutes() - 5); // Skip clock skew with server
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const containerName = getUniqueName("container");
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
await containerURL.create(Aborter.none);
const containerSAS = generateBlobSASQueryParameters(
{
containerName,
expiryTime: tmr,
ipRange: { start: "0.0.0.0", end: "255.255.255.255" },
permissions: ContainerSASPermissions.parse("racwdl").toString(),
protocol: SASProtocol.HTTPSandHTTP,
startTime: now,
version: "2016-05-31"
},
sharedKeyCredential as SharedKeyCredential
);
const sasURL = `${containerURL.url}?${containerSAS}`;
const containerURLwithSAS = new ContainerURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
await containerURLwithSAS.listBlobFlatSegment(Aborter.none);
await containerURL.delete(Aborter.none);
});
it("generateBlobSASQueryParameters should work for blob", async () => {
const now = new Date();
now.setMinutes(now.getMinutes() - 5); // Skip clock skew with server
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const containerName = getUniqueName("container");
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
await containerURL.create(Aborter.none);
const blobName = getUniqueName("blob");
const blobURL = PageBlobURL.fromContainerURL(containerURL, blobName);
await blobURL.create(Aborter.none, 1024, {
blobHTTPHeaders: {
blobContentType: "content-type-original"
}
});
const blobSAS = generateBlobSASQueryParameters(
{
blobName,
cacheControl: "cache-control-override",
containerName,
contentDisposition: "content-disposition-override",
contentEncoding: "content-encoding-override",
contentLanguage: "content-language-override",
contentType: "content-type-override",
expiryTime: tmr,
ipRange: { start: "0.0.0.0", end: "255.255.255.255" },
permissions: BlobSASPermissions.parse("racwd").toString(),
protocol: SASProtocol.HTTPSandHTTP,
startTime: now,
version: "2016-05-31"
},
sharedKeyCredential as SharedKeyCredential
);
const sasURL = `${blobURL.url}?${blobSAS}`;
const blobURLwithSAS = new PageBlobURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
const properties = await blobURLwithSAS.getProperties(Aborter.none);
assert.equal(properties.cacheControl, "cache-control-override");
assert.equal(properties.contentDisposition, "content-disposition-override");
assert.equal(properties.contentEncoding, "content-encoding-override");
assert.equal(properties.contentLanguage, "content-language-override");
assert.equal(properties.contentType, "content-type-override");
await containerURL.delete(Aborter.none);
});
it("generateBlobSASQueryParameters should work for blob with access policy", async () => {
const now = new Date();
now.setMinutes(now.getMinutes() - 5); // Skip clock skew with server
const tmr = new Date();
tmr.setDate(tmr.getDate() + 1);
// By default, credential is always the last element of pipeline factories
const factories = serviceURL.pipeline.factories;
const sharedKeyCredential = factories[factories.length - 1];
const containerName = getUniqueName("container");
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
await containerURL.create(Aborter.none);
const blobName = getUniqueName("blob");
const blobURL = PageBlobURL.fromContainerURL(containerURL, blobName);
await blobURL.create(Aborter.none, 1024);
const id = "unique-id";
await containerURL.setAccessPolicy(Aborter.none, undefined, [
{
accessPolicy: {
expiry: tmr,
permission: ContainerSASPermissions.parse("racwdl").toString(),
start: now
},
id
}
]);
const blobSAS = generateBlobSASQueryParameters(
{
containerName,
identifier: id
},
sharedKeyCredential as SharedKeyCredential
);
const sasURL = `${blobURL.url}?${blobSAS}`;
const blobURLwithSAS = new PageBlobURL(
sasURL,
StorageURL.newPipeline(new AnonymousCredential())
);
await blobURLwithSAS.getProperties(Aborter.none);
await containerURL.delete(Aborter.none);
});
});

Просмотреть файл

@ -1,5 +1,4 @@
import { Aborter } from "../../lib/Aborter";
import { BlobURL } from "../../lib/BlobURL";
import { BlockBlobURL } from "../../lib/BlockBlobURL";
import { ContainerURL } from "../../lib/ContainerURL";
import { getBSU, getUniqueName } from "../utils/index";
@ -8,13 +7,35 @@ describe("SharedKeyCredentialPolicy Node.js only", () => {
const serviceURL = getBSU();
const containerName: string = getUniqueName("1container-with-dash");
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
const blobName: string = getUniqueName("blob empty");
const blobURL = BlobURL.fromContainerURL(containerURL, blobName);
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
it("SharedKeyCredentialPolicy should work with special container and blob names", async () => {
before(async () => {
await containerURL.create(Aborter.none);
await blockBlobURL.upload(Aborter.none, "A", 1);
});
after(async () => {
await containerURL.delete(Aborter.none);
});
it("SharedKeyCredentialPolicy should work with special container and blob names with spaces", async () => {
const blobName: string = getUniqueName("blob empty");
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
await blockBlobURL.upload(Aborter.none, "A", 1);
});
it("SharedKeyCredentialPolicy should work with special container and blob names with /", async () => {
const blobName: string = getUniqueName("////blob/empty /another");
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
await blockBlobURL.upload(Aborter.none, "A", 1);
await blockBlobURL.getProperties(Aborter.none);
});
it("SharedKeyCredentialPolicy should work with special container and blob names uppercase", async () => {
const blobName: string = getUniqueName("////Upper/blob/empty /another");
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
await blockBlobURL.upload(Aborter.none, "A", 1);
await blockBlobURL.getProperties(Aborter.none);
});
});

Просмотреть файл

@ -119,6 +119,5 @@ export async function readStreamToLocalFile(
rs.on("error", reject);
ws.on("error", reject);
ws.on("finish", resolve);
rs.on("end", resolve);
});
}

Просмотреть файл

@ -1,4 +1,4 @@
import { HttpPipelineLogLevel, IHTTPPipelineLogger } from "../../lib/Pipeline";
import { HttpPipelineLogLevel, IHttpPipelineLogger } from "../../lib/Pipeline";
import { padStart } from "../../lib/utils/utils.common";
export function isBrowser(): boolean {
@ -29,7 +29,7 @@ export function base64decode(encodedString: string): string {
: Buffer.from(encodedString, "base64").toString();
}
export class ConsoleHttpPipelineLogger implements IHTTPPipelineLogger {
export class ConsoleHttpPipelineLogger implements IHttpPipelineLogger {
constructor(public minimumLogLevel: HttpPipelineLogLevel) {}
public log(logLevel: HttpPipelineLogLevel, message: string): void {
const logMessage = `${new Date().toISOString()} ${