File finished top level configs and StorageURL, ServiceURL and ShareURL
This commit is contained in:
Родитель
09f6a7ffe0
Коммит
ab737b7b47
|
@ -0,0 +1,50 @@
|
|||
# browser #
|
||||
browser/azure-storage.file.js
|
||||
|
||||
# dist-esm #
|
||||
!dist-esm/lib/**/*.js
|
||||
dist-esm/test
|
||||
dist-esm/samples
|
||||
|
||||
# dist-test #
|
||||
dist-test/
|
||||
|
||||
# Node #
|
||||
node_modules/
|
||||
|
||||
# Samples #
|
||||
samples/
|
||||
|
||||
# Swagger #
|
||||
swagger/
|
||||
|
||||
# typings #
|
||||
!typings/lib/**/*.d.ts
|
||||
typings/test
|
||||
typings/samples
|
||||
|
||||
# git #
|
||||
.git*
|
||||
|
||||
# Test #
|
||||
test/
|
||||
|
||||
# Others #
|
||||
.vscode/
|
||||
.idea/
|
||||
.travis.yml
|
||||
.gitignore
|
||||
gulpfile.js
|
||||
.git
|
||||
.DS_Store
|
||||
tsconfig.json
|
||||
tslint.json
|
||||
*.js.map
|
||||
*.zip
|
||||
package-lock.json
|
||||
karma.conf.js
|
||||
temp
|
||||
gulpfile.js
|
||||
rollup.config.js
|
||||
rollup.test.config.js
|
||||
*.html
|
|
@ -0,0 +1 @@
|
|||
# Breaking Changes
|
|
@ -0,0 +1,5 @@
|
|||
# Changelog
|
||||
|
||||
2018.11 Version 10.0.0-preview
|
||||
|
||||
* Initial Release. API version 2018-03-28 supported. Please see the README for information on the new design.
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
|
@ -0,0 +1,148 @@
|
|||
# Azure Storage SDK V10 for JavaScript - File
|
||||
|
||||
- [![npm version](https://badge.fury.io/js/%40azure%2Fstorage-file.svg)](https://badge.fury.io/js/%40azure%2Fstorage-file)
|
||||
- [API Reference documentation](https://docs.microsoft.com/en-us/javascript/api/%40azure/storage-file/index?view=azure-node-preview)
|
||||
|
||||
## Introduction
|
||||
|
||||
This project provides a SDK in JavaScript that makes it easy to consume Microsoft Azure Storage services.
|
||||
|
||||
Please note that this version of the SDK is a compete overhaul of the current [Azure Storage SDK for Node.js and JavaScript in Browsers](https://github.com/azure/azure-storage-node), and is based on the new Storage SDK architecture.
|
||||
|
||||
### Features
|
||||
|
||||
- File Storage
|
||||
- Get/Set File Service Properties
|
||||
- Create/List/Delete File Shares
|
||||
- Create/List/Delete File Directories
|
||||
- Create/Read/List/Update/Delete Files
|
||||
- Features new
|
||||
- Asynchronous I/O for all operations using the async methods
|
||||
- HttpPipeline which enables a high degree of per-request configurability
|
||||
- 1-to-1 correlation with the Storage REST API for clarity and simplicity
|
||||
|
||||
### Compatibility
|
||||
|
||||
This SDK is compatible with Node.js and browsers, and validated against LTS Node.js versions (>=6.5) and latest versions of Chrome, Firefox and Edge.
|
||||
|
||||
#### Compatible with IE11
|
||||
|
||||
You need polyfills to make this library work with IE11. The easiest way is to use [@babel/polyfill](https://babeljs.io/docs/en/babel-polyfill), or [polyfill service](https://polyfill.io/v2/docs/).
|
||||
Or you can load separate polyfills for missed ES feature(s).
|
||||
This library depends on following ES6 features which need external polyfills loaded.
|
||||
|
||||
- `Promise`
|
||||
- `String.prototype.startsWith`
|
||||
- `String.prototype.endsWith`
|
||||
- `String.prototype.repeat`
|
||||
- `String.prototype.includes`
|
||||
|
||||
#### Differences between Node.js and browsers
|
||||
|
||||
There are differences between Node.js and browsers runtime. When getting start with this SDK, pay attention to APIs or classes marked with _"ONLY AVAILABLE IN NODE.JS RUNTIME"_ or _"ONLY AVAILABLE IN BROWSERS"_.
|
||||
|
||||
##### Following features, interfaces, classes or functions are only available in Node.js
|
||||
|
||||
- Shared Key Authorization based on account name and account key
|
||||
- `SharedKeyCredential`
|
||||
- Shared Access Signature(SAS) generation
|
||||
- `generateAccountSASQueryParameters()`
|
||||
- `generateFileSASQueryParameters()`
|
||||
- Parallel uploading and downloading
|
||||
- `uploadFileToBlockFile()`
|
||||
- `uploadStreamToBlockFile()`
|
||||
- `downloadFileToBuffer()`
|
||||
|
||||
##### Following features, interfaces, classes or functions are only available in browsers
|
||||
|
||||
- Parallel uploading and downloading
|
||||
- `uploadBrowserDataToFile()`
|
||||
|
||||
## Getting Started
|
||||
|
||||
### NPM
|
||||
|
||||
The preferred way to install the Azure Storage SDK for JavaScript is to use the npm package manager. Simply type the following into a terminal window:
|
||||
|
||||
```bash
|
||||
npm install @azure/storage-file
|
||||
```
|
||||
|
||||
In your TypeScript or JavaScript file, import via following:
|
||||
|
||||
```JavaScript
|
||||
import * as Azure from "@azure/storage-file";
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
```JavaScript
|
||||
const Azure = require("@azure/storage-file");
|
||||
```
|
||||
|
||||
### JavaScript Bundle
|
||||
|
||||
To use the SDK with JS bundle in the browsers, simply add a script tag to your HTML pages pointing to the downloaded JS bundle file(s):
|
||||
|
||||
```html
|
||||
<script src="https://mydomain/azure-storage.file.min.js"></script>
|
||||
```
|
||||
|
||||
The JS bundled file is compatible with [UMD](https://github.com/umdjs/umd) standard, if no module system found, following global variable(s) will be exported:
|
||||
|
||||
- `azfile`
|
||||
|
||||
#### Download
|
||||
|
||||
Download latest released JS bundles from links in the [GitHub release page](https://github.com/Azure/azure-storage-js/releases). Or from following links directly:
|
||||
|
||||
- File [https://aka.ms/downloadazurestoragejsfile](https://aka.ms/downloadazurestoragejsfile)
|
||||
|
||||
### CORS
|
||||
|
||||
You need to set up [Cross-Origin Resource Sharing (CORS)](https://docs.microsoft.com/zh-cn/rest/api/storageservices/cross-origin-resource-sharing--cors--support-for-the-azure-storage-services) rules for your storage account if you need to develop for browsers. Go to Azure portal and Azure Storage Explorer, find your storage account, create new CORS rules for blob/queue/file/table service(s).
|
||||
|
||||
For example, you can create following CORS settings for debugging. But please customize the settings carefully according to your requirements in production environment.
|
||||
|
||||
- Allowed origins: \*
|
||||
- Allowed verbs: DELETE,GET,HEAD,MERGE,POST,OPTIONS,PUT
|
||||
- Allowed headers: \*
|
||||
- Exposed headers: \*
|
||||
- Maximum age (seconds): 86400
|
||||
|
||||
## SDK Architecture
|
||||
|
||||
The Azure Storage SDK for JavaScript provides low-level and high-level APIs.
|
||||
|
||||
- ServiceURL, ShareURL, DirectoryURL and FileURL objects provide the low-level API functionality and map one-to-one to the [Azure Storage File REST APIs](https://docs.microsoft.com/en-us/rest/api/storageservices/file-service-rest-api).
|
||||
|
||||
- The high-level APIs provide convenience abstractions such as uploading a large stream to a file (using multiple PutBlock requests).
|
||||
|
||||
## Code Samples
|
||||
|
||||
```javascript
|
||||
// TODO:
|
||||
```
|
||||
|
||||
## More Code Samples
|
||||
|
||||
- [File Storage Examples](https://github.com/azure/azure-storage-js/tree/master/file/samples)
|
||||
- [File Storage Examples - Test Cases](https://github.com/azure/azure-storage-js/tree/master/file/test/)
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under MIT.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit <https://cla.microsoft.com.>
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
@ -0,0 +1,16 @@
|
|||
const gulp = require("gulp");
|
||||
const zip = require("gulp-zip");
|
||||
|
||||
const version = require("./package.json").version;
|
||||
const zipFileName = `azurestoragejs.file-${version}.zip`;
|
||||
|
||||
gulp.task("zip", () => {
|
||||
return gulp
|
||||
.src([
|
||||
"browser/azure-storage.file.js",
|
||||
"browser/azure-storage.file.min.js",
|
||||
"browser/*.txt"
|
||||
])
|
||||
.pipe(zip(zipFileName))
|
||||
.pipe(gulp.dest("browser"));
|
||||
});
|
|
@ -0,0 +1,82 @@
|
|||
module.exports = function(config) {
|
||||
config.set({
|
||||
// base path that will be used to resolve all patterns (eg. files, exclude)
|
||||
basePath: "./",
|
||||
|
||||
// frameworks to use
|
||||
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
|
||||
frameworks: ["mocha"],
|
||||
|
||||
plugins: [
|
||||
"karma-mocha",
|
||||
"karma-mocha-reporter",
|
||||
"karma-chrome-launcher",
|
||||
"karma-edge-launcher",
|
||||
"karma-firefox-launcher",
|
||||
"karma-ie-launcher",
|
||||
"karma-env-preprocessor"
|
||||
],
|
||||
|
||||
// list of files / patterns to load in the browser
|
||||
files: [
|
||||
// polyfill service supporting IE11 missing features
|
||||
"https://cdn.polyfill.io/v2/polyfill.min.js?features=Promise,String.prototype.startsWith,String.prototype.endsWith,String.prototype.repeat,String.prototype.includes",
|
||||
"dist-test/index.browser.js"
|
||||
],
|
||||
|
||||
// list of files / patterns to exclude
|
||||
exclude: [],
|
||||
|
||||
// preprocess matching files before serving them to the browser
|
||||
// available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
|
||||
preprocessors: {
|
||||
"**/*.js": ["env"]
|
||||
},
|
||||
|
||||
// inject following environment values into browser testing with window.__env__
|
||||
// environment values MUST be exported or set with same console running "karma start"
|
||||
// https://www.npmjs.com/package/karma-env-preprocessor
|
||||
envPreprocessor: ["ACCOUNT_NAME", "ACCOUNT_SAS"],
|
||||
|
||||
// test results reporter to use
|
||||
// possible values: 'dots', 'progress'
|
||||
// available reporters: https://npmjs.org/browse/keyword/karma-reporter
|
||||
reporters: ["mocha"],
|
||||
|
||||
// web server port
|
||||
port: 9876,
|
||||
|
||||
// enable / disable colors in the output (reporters and logs)
|
||||
colors: true,
|
||||
|
||||
// level of logging
|
||||
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
|
||||
logLevel: config.LOG_INFO,
|
||||
|
||||
// enable / disable watching file and executing tests whenever any file changes
|
||||
autoWatch: false,
|
||||
|
||||
// start these browsers
|
||||
// available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
|
||||
// 'Chrome', 'Firefox', 'Edge', 'IE'
|
||||
browsers: ["Chrome"],
|
||||
|
||||
// Continuous Integration mode
|
||||
// if true, Karma captures browsers, runs the tests and exits
|
||||
singleRun: false,
|
||||
|
||||
// Concurrency level
|
||||
// how many browser should be started simultaneous
|
||||
concurrency: 1,
|
||||
|
||||
browserNoActivityTimeout: 600000,
|
||||
|
||||
client: {
|
||||
mocha: {
|
||||
// change Karma's debug.html to the mocha web reporter
|
||||
reporter: "html",
|
||||
timeout: "600000"
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
|
@ -0,0 +1,290 @@
|
|||
import { AbortSignalLike, isNode } from "ms-rest-js";
|
||||
|
||||
/**
|
||||
* An aborter instance implements AbortSignal interface, can abort HTTP requests.
|
||||
*
|
||||
* - Call Aborter.none to create a new Aborter instance without timeout.
|
||||
* - Call Aborter.timeout() to create a new Aborter instance with timeout.
|
||||
*
|
||||
* For an existing instance aborter:
|
||||
* - Call aborter.withTimeout() to create and return a child Aborter instance with timeout.
|
||||
* - Call aborter.withValue(key, value) to create and return a child Aborter instance with key/value pair.
|
||||
* - Call aborter.abort() to abort current instance and all children instances.
|
||||
* - Call aborter.getValue(key) to search and get value with corresponding key from current aborter to all parents.
|
||||
*
|
||||
* @example
|
||||
* // Abort without timeout
|
||||
* await blockBlobURL.upload(Aborter.none, buf, buf.length);
|
||||
*
|
||||
* @example
|
||||
* // Abort container create in 1000ms
|
||||
* await blockBlobURL.upload(Aborter.timeout(1000), buf, buf.length);
|
||||
*
|
||||
* @example
|
||||
* // Share aborter cross multiple operations in 30s
|
||||
* // Upload the same data to 2 different data centers at the same time, abort another when any of them is finished
|
||||
* const aborter = Aborter.timeout(30 * 1000);
|
||||
* blockBlobURL1.upload(aborter, buf, buf.length).then(aborter.abort);
|
||||
* blockBlobURL2.upload(aborter, buf, buf.length).then(aborter.abort);
|
||||
*
|
||||
* @example
|
||||
* // Cascaded aborting
|
||||
* // All operations can't take more than 30 seconds
|
||||
* const aborter = Aborter.timeout(30 * 1000);
|
||||
*
|
||||
* // Following 2 operations can't take more than 25 seconds
|
||||
* await blockBlobURL.upload(aborter.withTimeout(25 * 1000), buf, buf.length);
|
||||
* await blockBlobURL.upload(aborter.withTimeout(25 * 1000), buf, buf.length);
|
||||
*
|
||||
* @export
|
||||
* @class Aborter
|
||||
* @implements {AbortSignalLike}
|
||||
*/
|
||||
|
||||
export class Aborter implements AbortSignalLike {
|
||||
/**
|
||||
* Status of whether aborted or not.
|
||||
*
|
||||
* @readonly
|
||||
* @type {boolean}
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public get aborted(): boolean {
|
||||
return this._aborted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Aborter instance without timeout.
|
||||
*
|
||||
* @readonly
|
||||
* @static
|
||||
* @type {Aborter}
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public static get none(): Aborter {
|
||||
return new Aborter(undefined, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Aborter instance with timeout in million-seconds.
|
||||
* Set parameter timeout to 0 will not create a timer.
|
||||
*
|
||||
* @static
|
||||
* @param {number} {timeout} in million-seconds
|
||||
* @returns {Aborter}
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public static timeout(timeout: number): Aborter {
|
||||
return new Aborter(undefined, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* onabort event listener.
|
||||
*
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public onabort?: ((ev: Event) => any);
|
||||
|
||||
// tslint:disable-next-line:variable-name
|
||||
private _aborted: boolean = false;
|
||||
private timer?: any;
|
||||
private readonly parent?: Aborter;
|
||||
private readonly children: Aborter[] = []; // When child object calls dispose(), remove child from here
|
||||
private readonly abortEventListeners: Array<
|
||||
(this: AbortSignalLike, ev: any) => any
|
||||
> = [];
|
||||
// Pipeline proxies need to use "abortSignal as Aborter" in order to access non AbortSignalLike methods
|
||||
// immutable primitive types
|
||||
private readonly key?: string;
|
||||
private readonly value?: string | number | boolean | null;
|
||||
// private disposed: boolean = false;
|
||||
|
||||
/**
|
||||
* Private constructor for internal usage, creates an instance of Aborter.
|
||||
*
|
||||
* @param {Aborter} [parent] Optional. Parent aborter.
|
||||
* @param {number} [timeout=0] Optional. Timeout before abort in millisecond, 0 means no timeout.
|
||||
* @param {string} [key] Optional. Immutable key in string.
|
||||
* @param {(string | number | boolean | null)} [value] Optional. Immutable value.
|
||||
* @memberof Aborter
|
||||
*/
|
||||
private constructor(
|
||||
parent?: Aborter,
|
||||
timeout: number = 0,
|
||||
key?: string,
|
||||
value?: string | number | boolean | null
|
||||
) {
|
||||
this.parent = parent;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
|
||||
if (timeout > 0) {
|
||||
this.timer = setTimeout(() => {
|
||||
this.abort.call(this);
|
||||
}, timeout);
|
||||
|
||||
// When called, the active Timeout object will not require the Node.js event loop
|
||||
// to remain active. If there is no other activity keeping the event loop running,
|
||||
// the process may exit before the Timeout object's callback is invoked.
|
||||
if (this.timer && isNode) {
|
||||
this.timer!.unref();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and return a new Aborter instance, which will be appended as a child node of current Aborter.
|
||||
* Current Aborter instance becomes father node of the new instance. When current or father Aborter node
|
||||
* triggers timeout event, all children nodes abort event will be triggered too.
|
||||
*
|
||||
* When timeout parameter (in millisecond) is larger than 0, the abort event will be triggered when timeout.
|
||||
* Otherwise, call abort() method to manually abort.
|
||||
*
|
||||
* @param {number} {timeout} Timeout in millisecond.
|
||||
* @returns {Aborter} The new Aborter instance created.
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public withTimeout(timeout: number): Aborter {
|
||||
const childCancelContext = new Aborter(this, timeout);
|
||||
this.children.push(childCancelContext);
|
||||
return childCancelContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create and return a new Aborter instance, which will be appended as a child node of current Aborter.
|
||||
* Current Aborter instance becomes father node of the new instance. When current or father Aborter node
|
||||
* triggers timeout event, all children nodes abort event will be triggered too.
|
||||
*
|
||||
* Immutable key value pair will be set into the new created Aborter instance.
|
||||
* Call getValue() to find out latest value with corresponding key in the chain of
|
||||
* [current node] -> [parent node] and [grand parent node]....
|
||||
*
|
||||
* @param {string} key
|
||||
* @param {(string | number | boolean | null)} [value]
|
||||
* @returns {Aborter}
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public withValue(
|
||||
key: string,
|
||||
value?: string | number | boolean | null
|
||||
): Aborter {
|
||||
const childCancelContext = new Aborter(this, 0, key, value);
|
||||
this.children.push(childCancelContext);
|
||||
return childCancelContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find out latest value with corresponding key in the chain of
|
||||
* [current node] -> [parent node] -> [grand parent node] -> ... -> [root node].
|
||||
*
|
||||
* If key is not found, undefined will be returned.
|
||||
*
|
||||
* @param {string} key
|
||||
* @returns {(string | number | boolean | null | undefined)}
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public getValue(key: string): string | number | boolean | null | undefined {
|
||||
for (
|
||||
let parent: Aborter | undefined = this;
|
||||
parent;
|
||||
parent = parent.parent
|
||||
) {
|
||||
if (parent.key === key) {
|
||||
return parent.value;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger abort event immediately, the onabort and all abort event listeners will be triggered.
|
||||
* Will try to trigger abort event for all children Aborter nodes.
|
||||
*
|
||||
* - If there is a timeout, the timer will be cancelled.
|
||||
* - If aborted is true, nothing will happen.
|
||||
*
|
||||
* @returns
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public abort() {
|
||||
if (this.aborted) {
|
||||
return;
|
||||
}
|
||||
this.cancelTimer();
|
||||
|
||||
if (this.onabort) {
|
||||
this.onabort.call(this);
|
||||
}
|
||||
|
||||
this.abortEventListeners.forEach(listener => {
|
||||
listener.call(this);
|
||||
});
|
||||
|
||||
this.children.forEach(child => child.cancelByParent());
|
||||
|
||||
this._aborted = true;
|
||||
}
|
||||
|
||||
// public dispose() {
|
||||
// if (this.disposed || this.aborted) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// this.cancelTimer();
|
||||
|
||||
// // (parent)A <- B <- C(child), if B disposes, when A abort, C will not abort
|
||||
// if (this.parent) {
|
||||
// const index = this.parent.children.indexOf(this);
|
||||
// if (index > -1) {
|
||||
// this.parent.children.splice(index, 1);
|
||||
// }
|
||||
// }
|
||||
|
||||
// this.disposed = true;
|
||||
// }
|
||||
|
||||
/**
|
||||
* Added new "abort" event listener, only support "abort" event.
|
||||
*
|
||||
* @param {"abort"} _type Only support "abort" event
|
||||
* @param {(this: AbortSignalLike, ev: any) => any} listener
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public addEventListener(
|
||||
// tslint:disable-next-line:variable-name
|
||||
_type: "abort",
|
||||
listener: (this: AbortSignalLike, ev: any) => any
|
||||
): void {
|
||||
this.abortEventListeners.push(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove "abort" event listener, only support "abort" event.
|
||||
*
|
||||
* @param {"abort"} _type Only support "abort" event
|
||||
* @param {(this: AbortSignalLike, ev: any) => any} listener
|
||||
* @memberof Aborter
|
||||
*/
|
||||
public removeEventListener(
|
||||
// tslint:disable-next-line:variable-name
|
||||
_type: "abort",
|
||||
listener: (this: AbortSignalLike, ev: any) => any
|
||||
): void {
|
||||
const index = this.abortEventListeners.indexOf(listener);
|
||||
if (index > -1) {
|
||||
this.abortEventListeners.splice(index, 1);
|
||||
}
|
||||
}
|
||||
|
||||
private cancelByParent() {
|
||||
// if (!this.disposed) {
|
||||
this.abort();
|
||||
// }
|
||||
}
|
||||
|
||||
private cancelTimer() {
|
||||
if (this.timer) {
|
||||
clearTimeout(this.timer);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* This is a helper class to construct a string representing the permissions granted by an AccountSAS. Setting a value
|
||||
* to true means that any SAS which uses these permissions will grant permissions for that operation. Once all the
|
||||
* values are set, this should be serialized with toString and set as the permissions field on an
|
||||
* {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but
|
||||
* the order of the permissions is particular and this class guarantees correctness.
|
||||
*
|
||||
* @export
|
||||
* @class AccountSASPermissions
|
||||
*/
|
||||
export class AccountSASPermissions {
|
||||
/**
|
||||
* Parse initializes the AccountSASPermissions fields from a string.
|
||||
*
|
||||
* @static
|
||||
* @param {string} permissions
|
||||
* @returns {AccountSASPermissions}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public static parse(permissions: string): AccountSASPermissions {
|
||||
const accountSASPermissions = new AccountSASPermissions();
|
||||
|
||||
for (const c of permissions) {
|
||||
switch (c) {
|
||||
case "r":
|
||||
accountSASPermissions.read = true;
|
||||
break;
|
||||
case "w":
|
||||
accountSASPermissions.write = true;
|
||||
break;
|
||||
case "d":
|
||||
accountSASPermissions.delete = true;
|
||||
break;
|
||||
case "l":
|
||||
accountSASPermissions.list = true;
|
||||
break;
|
||||
case "a":
|
||||
accountSASPermissions.add = true;
|
||||
break;
|
||||
case "c":
|
||||
accountSASPermissions.create = true;
|
||||
break;
|
||||
case "u":
|
||||
accountSASPermissions.update = true;
|
||||
break;
|
||||
case "p":
|
||||
accountSASPermissions.process = true;
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Invalid permission character: ${c}`);
|
||||
}
|
||||
}
|
||||
|
||||
return accountSASPermissions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Permission to read resources and list queues and tables granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public read: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to write resources granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public write: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to create blobs and files granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public delete: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to list blob containers, blobs, shares, directories, and files granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public list: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to add messages, table entities, and append to blobs granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public add: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to create blobs and files granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public create: boolean = false;
|
||||
|
||||
/**
|
||||
* Permissions to update messages and table entities granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public update: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to get and delete messages granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public process: boolean = false;
|
||||
|
||||
/**
|
||||
* Produces the SAS permissions string for an Azure Storage account.
|
||||
* Call this method to set AccountSASSignatureValues Permissions field.
|
||||
*
|
||||
* Using this method will guarantee the resource types are in
|
||||
* an order accepted by the service.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
*
|
||||
* @returns {string}
|
||||
* @memberof AccountSASPermissions
|
||||
*/
|
||||
public toString(): string {
|
||||
// The order of the characters should be as specified here to ensure correctness:
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
// Use a string array instead of string concatenating += operator for performance
|
||||
const permissions: string[] = [];
|
||||
if (this.read) {
|
||||
permissions.push("r");
|
||||
}
|
||||
if (this.write) {
|
||||
permissions.push("w");
|
||||
}
|
||||
if (this.delete) {
|
||||
permissions.push("d");
|
||||
}
|
||||
if (this.list) {
|
||||
permissions.push("l");
|
||||
}
|
||||
if (this.add) {
|
||||
permissions.push("a");
|
||||
}
|
||||
if (this.create) {
|
||||
permissions.push("c");
|
||||
}
|
||||
if (this.update) {
|
||||
permissions.push("u");
|
||||
}
|
||||
if (this.process) {
|
||||
permissions.push("p");
|
||||
}
|
||||
return permissions.join("");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* This is a helper class to construct a string representing the resources accessible by an AccountSAS. Setting a value
|
||||
* to true means that any SAS which uses these permissions will grant access to that resource type. Once all the
|
||||
* values are set, this should be serialized with toString and set as the resources field on an
|
||||
* {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but
|
||||
* the order of the resources is particular and this class guarantees correctness.
|
||||
*
|
||||
* @export
|
||||
* @class AccountSASResourceTypes
|
||||
*/
|
||||
export class AccountSASResourceTypes {
|
||||
/**
|
||||
* Creates an {@link AccountSASResourceType} from the specified resource types string. This method will throw an
|
||||
* Error if it encounters a character that does not correspond to a valid resource type.
|
||||
*
|
||||
* @static
|
||||
* @param {string} resourceTypes
|
||||
* @returns {AccountSASResourceTypes}
|
||||
* @memberof AccountSASResourceTypes
|
||||
*/
|
||||
public static parse(resourceTypes: string): AccountSASResourceTypes {
|
||||
const accountSASResourceTypes = new AccountSASResourceTypes();
|
||||
|
||||
for (const c of resourceTypes) {
|
||||
switch (c) {
|
||||
case "s":
|
||||
accountSASResourceTypes.service = true;
|
||||
break;
|
||||
case "c":
|
||||
accountSASResourceTypes.container = true;
|
||||
break;
|
||||
case "o":
|
||||
accountSASResourceTypes.object = true;
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Invalid resource type: ${c}`);
|
||||
}
|
||||
}
|
||||
|
||||
return accountSASResourceTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Permission to access service level APIs granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASResourceTypes
|
||||
*/
|
||||
public service: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASResourceTypes
|
||||
*/
|
||||
public container: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASResourceTypes
|
||||
*/
|
||||
public object: boolean = false;
|
||||
|
||||
/**
|
||||
* Converts the given resource types to a string.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
*
|
||||
* @returns {string}
|
||||
* @memberof AccountSASResourceTypes
|
||||
*/
|
||||
public toString(): string {
|
||||
const resourceTypes: string[] = [];
|
||||
if (this.service) {
|
||||
resourceTypes.push("s");
|
||||
}
|
||||
if (this.container) {
|
||||
resourceTypes.push("c");
|
||||
}
|
||||
if (this.object) {
|
||||
resourceTypes.push("o");
|
||||
}
|
||||
return resourceTypes.join("");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* This is a helper class to construct a string representing the services accessible by an AccountSAS. Setting a value
|
||||
* to true means that any SAS which uses these permissions will grant access to that service. Once all the
|
||||
* values are set, this should be serialized with toString and set as the services field on an
|
||||
* {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but
|
||||
* the order of the services is particular and this class guarantees correctness.
|
||||
*
|
||||
* @export
|
||||
* @class AccountSASServices
|
||||
*/
|
||||
export class AccountSASServices {
|
||||
/**
|
||||
* Creates an {@link AccountSASServices} from the specified services string. This method will throw an
|
||||
* Error if it encounters a character that does not correspond to a valid service.
|
||||
*
|
||||
* @static
|
||||
* @param {string} services
|
||||
* @returns {AccountSASServices}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public static parse(services: string): AccountSASServices {
|
||||
const accountSASServices = new AccountSASServices();
|
||||
|
||||
for (const c of services) {
|
||||
switch (c) {
|
||||
case "b":
|
||||
accountSASServices.blob = true;
|
||||
break;
|
||||
case "f":
|
||||
accountSASServices.file = true;
|
||||
break;
|
||||
case "q":
|
||||
accountSASServices.queue = true;
|
||||
break;
|
||||
case "t":
|
||||
accountSASServices.table = true;
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Invalid service character: ${c}`);
|
||||
}
|
||||
}
|
||||
|
||||
return accountSASServices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Permission to access blob resources granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public blob: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to access file resources granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public file: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to access queue resources granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public queue: boolean = false;
|
||||
|
||||
/**
|
||||
* Permission to access table resources granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public table: boolean = false;
|
||||
|
||||
/**
|
||||
* Converts the given services to a string.
|
||||
*
|
||||
* @returns {string}
|
||||
* @memberof AccountSASServices
|
||||
*/
|
||||
public toString(): string {
|
||||
const services: string[] = [];
|
||||
if (this.blob) {
|
||||
services.push("b");
|
||||
}
|
||||
if (this.table) {
|
||||
services.push("t");
|
||||
}
|
||||
if (this.queue) {
|
||||
services.push("q");
|
||||
}
|
||||
if (this.file) {
|
||||
services.push("f");
|
||||
}
|
||||
return services.join("");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
import { HttpRequestBody, TransferProgressEvent } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { BlobURL } from "./BlobURL";
|
||||
import { ContainerURL } from "./ContainerURL";
|
||||
import { AppendBlob } from "./generated/operations";
|
||||
import {
|
||||
IAppendBlobAccessConditions,
|
||||
IBlobAccessConditions,
|
||||
IMetadata
|
||||
} from "./models";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { URLConstants } from "./utils/constants";
|
||||
import { appendToURLPath, setURLParameter } from "./utils/utils.common";
|
||||
|
||||
export interface IAppendBlobCreateOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
metadata?: IMetadata;
|
||||
}
|
||||
|
||||
export interface IAppendBlobAppendBlockOptions {
|
||||
accessConditions?: IAppendBlobAccessConditions;
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
transactionalContentMD5?: Uint8Array;
|
||||
}
|
||||
|
||||
/**
|
||||
* AppendBlobURL defines a set of operations applicable to append blobs.
|
||||
*
|
||||
* @export
|
||||
* @class AppendBlobURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class AppendBlobURL extends BlobURL {
|
||||
/**
|
||||
* Creates a AppendBlobURL object from ContainerURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {ContainerURL} containerURL
|
||||
* @param {string} blobName
|
||||
* @returns {AppendBlobURL}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public static fromContainerURL(
|
||||
containerURL: ContainerURL,
|
||||
blobName: string
|
||||
): AppendBlobURL {
|
||||
return new AppendBlobURL(
|
||||
appendToURLPath(containerURL.url, blobName),
|
||||
containerURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a AppendBlobURL object from BlobURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {BlobURL} blobURL
|
||||
* @returns {AppendBlobURL}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public static fromBlobURL(blobURL: BlobURL): AppendBlobURL {
|
||||
return new AppendBlobURL(blobURL.url, blobURL.pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* appendBlobsContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {AppendBlobs}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
private appendBlobContext: AppendBlob;
|
||||
|
||||
/**
|
||||
* Creates an instance of AppendBlobURL.
|
||||
* @param {string} url A URL string pointing to Azure Storage append blob, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/appendblob". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/appendblob?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.appendBlobContext = new AppendBlob(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new AppendBlobURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {AppendBlobURL}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): AppendBlobURL {
|
||||
return new AppendBlobURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new AppendBlobURL object identical to the source but with the
|
||||
* specified snapshot timestamp.
|
||||
* Provide "" will remove the snapshot and return a URL to the base blob.
|
||||
*
|
||||
* @param {string} snapshot
|
||||
* @returns {AppendBlobURL}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public withSnapshot(snapshot: string): AppendBlobURL {
|
||||
return new AppendBlobURL(
|
||||
setURLParameter(
|
||||
this.url,
|
||||
URLConstants.Parameters.SNAPSHOT,
|
||||
snapshot.length === 0 ? undefined : snapshot
|
||||
),
|
||||
this.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IAppendBlobCreateOptions} [options]
|
||||
* @returns {Promise<Models.AppendBlobsCreateResponse>}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public async create(
|
||||
aborter: Aborter,
|
||||
options: IAppendBlobCreateOptions = {}
|
||||
): Promise<Models.AppendBlobCreateResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.appendBlobContext.create(0, {
|
||||
abortSignal: aborter,
|
||||
blobHTTPHeaders: options.blobHTTPHeaders,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Commits a new block of data to the end of the existing append blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/append-block
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {HttpRequestBody} body
|
||||
* @param {number} contentLength
|
||||
* @param {IAppendBlobAppendBlockOptions} [options]
|
||||
* @returns {Promise<Models.AppendBlobsAppendBlockResponse>}
|
||||
* @memberof AppendBlobURL
|
||||
*/
|
||||
public async appendBlock(
|
||||
aborter: Aborter,
|
||||
body: HttpRequestBody,
|
||||
contentLength: number,
|
||||
options: IAppendBlobAppendBlockOptions = {}
|
||||
): Promise<Models.AppendBlobAppendBlockResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.appendBlobContext.appendBlock(body, contentLength, {
|
||||
abortSignal: aborter,
|
||||
appendPositionAccessConditions:
|
||||
options.accessConditions.appendPositionAccessConditions,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
onUploadProgress: options.progress,
|
||||
transactionalContentMD5: options.transactionalContentMD5
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a blob. Setting
|
||||
* a value to true means that any SAS which uses these permissions will grant permissions for that operation. Once all
|
||||
* the values are set, this should be serialized with toString and set as the permissions field on a
|
||||
* {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but
|
||||
* the order of the permissions is particular and this class guarantees correctness.
|
||||
*
|
||||
* @export
|
||||
* @class BlobSASPermissions
|
||||
*/
|
||||
export class BlobSASPermissions {
|
||||
/**
|
||||
* Creates a {@link BlobSASPermission} from the specified permissions string. This method will throw an
|
||||
* Error if it encounters a character that does not correspond to a valid permission.
|
||||
*
|
||||
* @static
|
||||
* @param {string} permissions
|
||||
* @returns {BlobSASPermissions}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public static parse(permissions: string): BlobSASPermissions {
|
||||
const blobSASPermissions = new BlobSASPermissions();
|
||||
|
||||
for (const char of permissions) {
|
||||
switch (char) {
|
||||
case "r":
|
||||
blobSASPermissions.read = true;
|
||||
break;
|
||||
case "a":
|
||||
blobSASPermissions.add = true;
|
||||
break;
|
||||
case "c":
|
||||
blobSASPermissions.create = true;
|
||||
break;
|
||||
case "w":
|
||||
blobSASPermissions.write = true;
|
||||
break;
|
||||
case "d":
|
||||
blobSASPermissions.delete = true;
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Invalid permission: ${char}`);
|
||||
}
|
||||
}
|
||||
|
||||
return blobSASPermissions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies Read access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public read: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Add access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public add: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Create access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public create: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Write access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public write: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Delete access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public delete: boolean = false;
|
||||
|
||||
/**
|
||||
* Converts the given permissions to a string. Using this method will guarantee the permissions are in an
|
||||
* order accepted by the service.
|
||||
*
|
||||
* @returns {string} A string which represents the BlobSASPermissions
|
||||
* @memberof BlobSASPermissions
|
||||
*/
|
||||
public toString(): string {
|
||||
const permissions: string[] = [];
|
||||
if (this.read) {
|
||||
permissions.push("r");
|
||||
}
|
||||
if (this.add) {
|
||||
permissions.push("a");
|
||||
}
|
||||
if (this.create) {
|
||||
permissions.push("c");
|
||||
}
|
||||
if (this.write) {
|
||||
permissions.push("w");
|
||||
}
|
||||
if (this.delete) {
|
||||
permissions.push("d");
|
||||
}
|
||||
return permissions.join("");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,574 @@
|
|||
import { TransferProgressEvent } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { ContainerURL } from "./ContainerURL";
|
||||
import { Blob } from "./generated/operations";
|
||||
import { rangeToString } from "./IRange";
|
||||
import { IBlobAccessConditions, IMetadata } from "./models";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { StorageURL } from "./StorageURL";
|
||||
import { URLConstants } from "./utils/constants";
|
||||
import { appendToURLPath, setURLParameter } from "./utils/utils.common";
|
||||
|
||||
export interface IBlobDownloadOptions {
|
||||
snapshot?: string;
|
||||
rangeGetContentMD5?: boolean;
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
}
|
||||
|
||||
export interface IBlobGetPropertiesOptions {
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobDeleteOptions {
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
deleteSnapshots?: Models.DeleteSnapshotsOptionType;
|
||||
}
|
||||
|
||||
export interface IBlobSetHTTPHeadersOptions {
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
}
|
||||
|
||||
export interface IBlobSetMetadataOptions {
|
||||
metadata?: IMetadata;
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobAcquireLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobReleaseLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobRenewLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobChangeLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobBreakLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobCreateSnapshotOptions {
|
||||
metadata?: IMetadata;
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobStartCopyFromURLOptions {
|
||||
metadata?: IMetadata;
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
sourceModifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobAbortCopyFromURLOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobSetTierOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
/**
|
||||
* A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob,
|
||||
* append blob, or page blob.
|
||||
*
|
||||
* @export
|
||||
* @class BlobURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class BlobURL extends StorageURL {
|
||||
/**
|
||||
* Creates a BlobURL object from an ContainerURL object.
|
||||
*
|
||||
* @static
|
||||
* @param {ContainerURL} containerURL
|
||||
* @param {string} blobName
|
||||
* @returns
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public static fromContainerURL(containerURL: ContainerURL, blobName: string) {
|
||||
return new BlobURL(
|
||||
appendToURLPath(containerURL.url, blobName),
|
||||
containerURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* blobContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {Blobs}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
private blobContext: Blob;
|
||||
|
||||
/**
|
||||
* Creates an instance of BlobURL.
|
||||
* @param {string} url A URL string pointing to Azure Storage blob, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/blob". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/blob?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.blobContext = new Blob(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new BlobURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {BlobURL}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): BlobURL {
|
||||
return new BlobURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
|
||||
* Provide "" will remove the snapshot and return a URL to the base blob.
|
||||
*
|
||||
* @param {string} snapshot
|
||||
* @returns {BlobURL} A new BlobURL object identical to the source but with the specified snapshot timestamp
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public withSnapshot(snapshot: string): BlobURL {
|
||||
return new BlobURL(
|
||||
setURLParameter(
|
||||
this.url,
|
||||
URLConstants.Parameters.SNAPSHOT,
|
||||
snapshot.length === 0 ? undefined : snapshot
|
||||
),
|
||||
this.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads or downloads a blob from the system, including its metadata and properties.
|
||||
* You can also call Get Blob to read a snapshot.
|
||||
*
|
||||
* * In Node.js, data returns in a Readable stream readableStreamBody
|
||||
* * In browsers, data returns in a promise blobBody
|
||||
*
|
||||
* WARNING: In Node.js, abort or network error during reading from response stream will NOT
|
||||
* trigger any error, readable stream will end immediately. You need to check downloaded data
|
||||
* length when stream ends.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} offset From which position of the blob to download, >= 0
|
||||
* @param {number} [count] How much data to be downloaded, > 0. Will download to the end when undefined
|
||||
* @param {IBlobDownloadOptions} [options]
|
||||
* @returns {Promise<Models.BlobDownloadResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async download(
|
||||
aborter: Aborter,
|
||||
offset: number,
|
||||
count?: number,
|
||||
options: IBlobDownloadOptions = {}
|
||||
): Promise<Models.BlobDownloadResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
|
||||
const res = await this.blobContext.download({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions,
|
||||
onDownloadProgress: options.progress,
|
||||
range:
|
||||
offset === 0 && !count ? undefined : rangeToString({ offset, count }),
|
||||
rangeGetContentMD5: options.rangeGetContentMD5,
|
||||
snapshot: options.snapshot
|
||||
});
|
||||
|
||||
// Default axios based HTTP client cannot abort download stream, manually pause/abort it
|
||||
// Currently, no error will be triggered when network error or abort during reading from response stream
|
||||
// TODO: Now need to manually validate the date length when stream ends, add download retry in the future
|
||||
if (res.readableStreamBody) {
|
||||
aborter.addEventListener("abort", () => {
|
||||
if (res.readableStreamBody) {
|
||||
res.readableStreamBody.pause();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all user-defined metadata, standard HTTP properties, and system properties
|
||||
* for the blob. It does not return the content of the blob.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IBlobGetPropertiesOptions} [options]
|
||||
* @returns {Promise<Models.BlobGetPropertiesResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async getProperties(
|
||||
aborter: Aborter,
|
||||
options: IBlobGetPropertiesOptions = {}
|
||||
): Promise<Models.BlobGetPropertiesResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
return this.blobContext.getProperties({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the specified blob or snapshot for deletion. The blob is later deleted
|
||||
* during garbage collection. Note that in order to delete a blob, you must delete
|
||||
* all of its snapshots. You can delete both at the same time with the Delete
|
||||
* Blob operation.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IBlobDeleteOptions} [options]
|
||||
* @returns {Promise<Models.BlobDeleteResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async delete(
|
||||
aborter: Aborter,
|
||||
options: IBlobDeleteOptions = {}
|
||||
): Promise<Models.BlobDeleteResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
return this.blobContext.deleteMethod({
|
||||
abortSignal: aborter,
|
||||
deleteSnapshots: options.deleteSnapshots,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Restores the contents and metadata of soft deleted blob and any associated
|
||||
* soft deleted snapshots. Undelete Blob is supported only on version 2017-07-29
|
||||
* or later.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/undelete-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @returns {Promise<Models.BlobUndeleteResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async undelete(
|
||||
aborter: Aborter
|
||||
): Promise<Models.BlobUndeleteResponse> {
|
||||
return this.blobContext.undelete({
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets system properties on the blob.
|
||||
*
|
||||
* If no option provided, or no value provided for the blob HTTP headers in the options,
|
||||
* these blob HTTP headers without a value will be cleared.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IBlobSetHTTPHeadersOptions} [options]
|
||||
* @returns {Promise<Models.BlobSetHTTPHeadersResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async setHTTPHeaders(
|
||||
aborter: Aborter,
|
||||
options: IBlobSetHTTPHeadersOptions = {}
|
||||
): Promise<Models.BlobSetHTTPHeadersResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
return this.blobContext.setHTTPHeaders({
|
||||
abortSignal: aborter,
|
||||
blobHTTPHeaders: options.blobHTTPHeaders,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets user-defined metadata for the specified blob as one or more name-value pairs.
|
||||
*
|
||||
* If no option provided, or no metadata defined in the option parameter, the blob
|
||||
* metadata will be removed.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-metadata
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IBlobSetMetadataOptions} [options]
|
||||
* @returns {Promise<Models.BlobSetMetadataResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async setMetadata(
|
||||
aborter: Aborter,
|
||||
options: IBlobSetMetadataOptions = {}
|
||||
): Promise<Models.BlobSetMetadataResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
return this.blobContext.setMetadata({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Establishes and manages a lock on a blob for write and delete operations.
|
||||
* The lock duration can be 15 to 60 seconds, or can be infinite.
|
||||
* In versions prior to 2012-02-12, the lock duration is 60 seconds.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} proposedLeaseId Can be specified in any valid GUID string format
|
||||
* @param {number} duration The lock duration can be 15 to 60 seconds, or can be infinite
|
||||
* @param {IBlobAcquireLeaseOptions} [options]
|
||||
* @returns {Promise<Models.BlobAcquireLeaseResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async acquireLease(
|
||||
aborter: Aborter,
|
||||
proposedLeaseId: string,
|
||||
duration: number,
|
||||
options: IBlobAcquireLeaseOptions = {}
|
||||
): Promise<Models.BlobAcquireLeaseResponse> {
|
||||
return this.blobContext.acquireLease({
|
||||
abortSignal: aborter,
|
||||
duration,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions,
|
||||
proposedLeaseId
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To free the lease if it is no longer needed so that another client may immediately
|
||||
* acquire a lease against the blob.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {IBlobReleaseLeaseOptions} [options]
|
||||
* @returns {Promise<Models.BlobReleaseLeaseResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async releaseLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
options: IBlobReleaseLeaseOptions = {}
|
||||
): Promise<Models.BlobReleaseLeaseResponse> {
|
||||
return this.blobContext.releaseLease(leaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To renew an existing lease.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {IBlobRenewLeaseOptions} [options]
|
||||
* @returns {Promise<Models.BlobRenewLeaseResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async renewLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
options: IBlobRenewLeaseOptions = {}
|
||||
): Promise<Models.BlobRenewLeaseResponse> {
|
||||
return this.blobContext.renewLease(leaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To change the ID of an existing lease.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {string} proposedLeaseId
|
||||
* @param {IBlobChangeLeaseOptions} [options]
|
||||
* @returns {Promise<Models.BlobChangeLeaseResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async changeLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
proposedLeaseId: string,
|
||||
options: IBlobChangeLeaseOptions = {}
|
||||
): Promise<Models.BlobChangeLeaseResponse> {
|
||||
return this.blobContext.changeLease(leaseId, proposedLeaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To end the lease but ensure that another client cannot acquire a new lease
|
||||
* until the current lease period has expired.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} [breakPeriod]
|
||||
* @param {IBlobBreakLeaseOptions} [options]
|
||||
* @returns {Promise<Models.BlobBreakLeaseResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async breakLease(
|
||||
aborter: Aborter,
|
||||
breakPeriod?: number,
|
||||
options: IBlobBreakLeaseOptions = {}
|
||||
): Promise<Models.BlobBreakLeaseResponse> {
|
||||
return this.blobContext.breakLease({
|
||||
abortSignal: aborter,
|
||||
breakPeriod,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a read-only snapshot of a blob.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/snapshot-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IBlobCreateSnapshotOptions} [options]
|
||||
* @returns {Promise<Models.BlobCreateSnapshotResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async createSnapshot(
|
||||
aborter: Aborter,
|
||||
options: IBlobCreateSnapshotOptions = {}
|
||||
): Promise<Models.BlobCreateSnapshotResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
return this.blobContext.createSnapshot({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies a blob to a destination within the storage account.
|
||||
* In version 2012-02-12 and later, the source for a Copy Blob operation can be
|
||||
* a committed blob in any Azure storage account.
|
||||
* Beginning with version 2015-02-21, the source for a Copy Blob operation can be
|
||||
* an Azure file in any Azure storage account.
|
||||
* Only storage accounts created on or after June 7th, 2012 allow the Copy Blob
|
||||
* operation to copy from another storage account.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} copySource
|
||||
* @param {IBlobStartCopyFromURLOptions} [options]
|
||||
* @returns {Promise<Models.BlobStartCopyFromURLResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async startCopyFromURL(
|
||||
aborter: Aborter,
|
||||
copySource: string,
|
||||
options: IBlobStartCopyFromURLOptions = {}
|
||||
): Promise<Models.BlobStartCopyFromURLResponse> {
|
||||
options.blobAccessConditions = options.blobAccessConditions || {};
|
||||
options.sourceModifiedAccessConditions =
|
||||
options.sourceModifiedAccessConditions || {};
|
||||
|
||||
return this.blobContext.startCopyFromURL(copySource, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.blobAccessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.blobAccessConditions.modifiedAccessConditions,
|
||||
sourceModifiedAccessConditions: {
|
||||
sourceIfMatch: options.sourceModifiedAccessConditions.ifMatch,
|
||||
sourceIfModifiedSince:
|
||||
options.sourceModifiedAccessConditions.ifModifiedSince,
|
||||
sourceIfNoneMatch: options.sourceModifiedAccessConditions.ifNoneMatch,
|
||||
sourceIfUnmodifiedSince:
|
||||
options.sourceModifiedAccessConditions.ifUnmodifiedSince
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts a pending Copy Blob operation, and leaves a destination blob with zero
|
||||
* length and full metadata. Version 2012-02-12 and newer.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/abort-copy-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} copyId
|
||||
* @param {IBlobAbortCopyFromURLOptions} [options]
|
||||
* @returns {Promise<Models.BlobAbortCopyFromURLResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async abortCopyFromURL(
|
||||
aborter: Aborter,
|
||||
copyId: string,
|
||||
options: IBlobAbortCopyFromURLOptions = {}
|
||||
): Promise<Models.BlobAbortCopyFromURLResponse> {
|
||||
return this.blobContext.abortCopyFromURL(copyId, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the tier on a blob. The operation is allowed on a page blob in a premium
|
||||
* storage account and on a block blob in a blob storage account (locally redundant
|
||||
* storage only). A premium page blob's tier determines the allowed size, IOPS,
|
||||
* and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive
|
||||
* storage type. This operation does not update the blob's ETag.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tier
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.AccessTier} tier
|
||||
* @param {IBlobSetTierOptions} [options]
|
||||
* @returns {Promise<Models.BlobsSetTierResponse>}
|
||||
* @memberof BlobURL
|
||||
*/
|
||||
public async setTier(
|
||||
aborter: Aborter,
|
||||
tier: Models.AccessTier,
|
||||
options: IBlobSetTierOptions = {}
|
||||
): Promise<Models.BlobSetTierResponse> {
|
||||
return await this.blobContext.setTier(tier, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,313 @@
|
|||
import { HttpRequestBody, TransferProgressEvent } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { BlobURL } from "./BlobURL";
|
||||
import { ContainerURL } from "./ContainerURL";
|
||||
import { BlockBlob } from "./generated/operations";
|
||||
import { IRange, rangeToString } from "./IRange";
|
||||
import { IBlobAccessConditions, IMetadata } from "./models";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { URLConstants } from "./utils/constants";
|
||||
import { appendToURLPath, setURLParameter } from "./utils/utils.common";
|
||||
|
||||
export interface IBlockBlobUploadOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
metadata?: IMetadata;
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
}
|
||||
|
||||
export interface IBlockBlobStageBlockOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
transactionalContentMD5?: Uint8Array;
|
||||
}
|
||||
|
||||
export interface IBlockBlobStageBlockFromURLOptions {
|
||||
range?: IRange;
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
sourceContentMD5?: Uint8Array;
|
||||
}
|
||||
|
||||
export interface IBlockBlobCommitBlockListOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
metadata?: IMetadata;
|
||||
}
|
||||
|
||||
export interface IBlockBlobGetBlockListOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
/**
|
||||
* BlockBlobURL defines a set of operations applicable to block blobs.
|
||||
*
|
||||
* @export
|
||||
* @class BlockBlobURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class BlockBlobURL extends BlobURL {
|
||||
/**
|
||||
* Creates a BlockBlobURL object from ContainerURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {ContainerURL} containerURL
|
||||
* @param {string} blobName
|
||||
* @returns {BlockBlobURL}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public static fromContainerURL(
|
||||
containerURL: ContainerURL,
|
||||
blobName: string
|
||||
): BlockBlobURL {
|
||||
return new BlockBlobURL(
|
||||
appendToURLPath(containerURL.url, blobName),
|
||||
containerURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a BlockBlobURL object from BlobURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {BlobURL} blobURL
|
||||
* @returns {BlockBlobURL}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public static fromBlobURL(blobURL: BlobURL): BlockBlobURL {
|
||||
return new BlockBlobURL(blobURL.url, blobURL.pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* blockBlobContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {BlockBlobs}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
private blockBlobContext: BlockBlob;
|
||||
|
||||
/**
|
||||
* Creates an instance of BlockBlobURL.
|
||||
* @param {string} url A URL string pointing to Azure Storage block blob, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/blockblob". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/blockblob?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.blockBlobContext = new BlockBlob(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new BlockBlobURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {BlockBlobURL}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): BlockBlobURL {
|
||||
return new BlockBlobURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new BlockBlobURL object identical to the source but with the
|
||||
* specified snapshot timestamp.
|
||||
* Provide "" will remove the snapshot and return a URL to the base blob.
|
||||
*
|
||||
* @param {string} snapshot
|
||||
* @returns {BlockBlobURL}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public withSnapshot(snapshot: string): BlockBlobURL {
|
||||
return new BlockBlobURL(
|
||||
setURLParameter(
|
||||
this.url,
|
||||
URLConstants.Parameters.SNAPSHOT,
|
||||
snapshot.length === 0 ? undefined : snapshot
|
||||
),
|
||||
this.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new block blob, or updates the content of an existing block blob.
|
||||
* Updating an existing block blob overwrites any existing metadata on the blob.
|
||||
* Partial updates are not supported; the content of the existing blob is
|
||||
* overwritten with the new content. To perform a partial update of a block blob's,
|
||||
* use stageBlock and commitBlockList.
|
||||
*
|
||||
* This is a non-parallel uploading method, please use uploadFileToBlockBlob(),
|
||||
* uploadStreamToBlockBlob() or uploadBrowserDataToBlockBlob() for better performance
|
||||
* with concurrency uploading.
|
||||
*
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {HttpRequestBody} body
|
||||
* @param {number} contentLength
|
||||
* @param {IBlockBlobUploadOptions} [options]
|
||||
* @returns {Promise<Models.BlockBlobUploadResponse>}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public async upload(
|
||||
aborter: Aborter,
|
||||
body: HttpRequestBody,
|
||||
contentLength: number,
|
||||
options: IBlockBlobUploadOptions = {}
|
||||
): Promise<Models.BlockBlobUploadResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.blockBlobContext.upload(body, contentLength, {
|
||||
abortSignal: aborter,
|
||||
blobHTTPHeaders: options.blobHTTPHeaders,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
onUploadProgress: options.progress
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads the specified block to the block blob's "staging area" to be later
|
||||
* committed by a call to commitBlockList.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-block
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} blockId A 64-byte value that is base64-encoded
|
||||
* @param {HttpRequestBody} body
|
||||
* @param {number} contentLength
|
||||
* @param {IBlockBlobStageBlockOptions} [options]
|
||||
* @returns {Promise<Models.BlockBlobStageBlockResponse>}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public async stageBlock(
|
||||
aborter: Aborter,
|
||||
blockId: string,
|
||||
body: HttpRequestBody,
|
||||
contentLength: number,
|
||||
options: IBlockBlobStageBlockOptions = {}
|
||||
): Promise<Models.BlockBlobStageBlockResponse> {
|
||||
return this.blockBlobContext.stageBlock(blockId, contentLength, body, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions,
|
||||
onUploadProgress: options.progress,
|
||||
transactionalContentMD5: options.transactionalContentMD5
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* The Stage Block From URL operation creates a new block to be committed as part
|
||||
* of a blob where the contents are read from a URL.
|
||||
* This API is available starting in version 2018-03-28.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} blockId A 64-byte value that is base64-encoded
|
||||
* @param {string} sourceURL Specifies the URL of the blob. The value
|
||||
* may be a URL of up to 2 KB in length that specifies a blob.
|
||||
* The value should be URL-encoded as it would appear
|
||||
* in a request URI. The source blob must either be public
|
||||
* or must be authenticated via a shared access signature.
|
||||
* If the source blob is public, no authentication is required
|
||||
* to perform the operation. Here are some examples of source object URLs:
|
||||
* - https://myaccount.blob.core.windows.net/mycontainer/myblob
|
||||
* - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
|
||||
* @param {number} offset From which position of the blob to download, >= 0
|
||||
* @param {number} [count] How much data to be downloaded, > 0. Will download to the end when undefined
|
||||
* @param {IBlockBlobStageBlockFromURLOptions} [options={}]
|
||||
* @returns {Promise<Models.BlockBlobStageBlockFromURLResponse>}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public async stageBlockFromURL(
|
||||
aborter: Aborter,
|
||||
blockId: string,
|
||||
sourceURL: string,
|
||||
offset: number,
|
||||
count?: number,
|
||||
options: IBlockBlobStageBlockFromURLOptions = {}
|
||||
): Promise<Models.BlockBlobStageBlockFromURLResponse> {
|
||||
return this.blockBlobContext.stageBlockFromURL(blockId, 0, sourceURL, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions,
|
||||
sourceContentMD5: options.sourceContentMD5,
|
||||
sourceRange:
|
||||
offset === 0 && !count ? undefined : rangeToString({ offset, count })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a blob by specifying the list of block IDs that make up the blob.
|
||||
* In order to be written as part of a blob, a block must have been successfully written
|
||||
* to the server in a prior stageBlock operation. You can call commitBlockList to update a blob
|
||||
* by uploading only those blocks that have changed, then committing the new and existing
|
||||
* blocks together. Any blocks not specified in the block list and permanently deleted.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-block-list
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string[]} blocks Array of 64-byte value that is base64-encoded
|
||||
* @param {IBlockBlobCommitBlockListOptions} [options]
|
||||
* @returns {Promise<Models.BlockBlobCommitBlockListResponse>}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public async commitBlockList(
|
||||
aborter: Aborter,
|
||||
blocks: string[],
|
||||
options: IBlockBlobCommitBlockListOptions = {}
|
||||
): Promise<Models.BlockBlobCommitBlockListResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.blockBlobContext.commitBlockList(
|
||||
{ latest: blocks },
|
||||
{
|
||||
abortSignal: aborter,
|
||||
blobHTTPHeaders: options.blobHTTPHeaders,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of blocks that have been uploaded as part of a block blob
|
||||
* using the specified block list filter.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/get-block-list
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.BlockListType} listType
|
||||
* @param {IBlockBlobGetBlockListOptions} [options]
|
||||
* @returns {Promise<Models.BlockBlobGetBlockListResponse>}
|
||||
* @memberof BlockBlobURL
|
||||
*/
|
||||
public async getBlockList(
|
||||
aborter: Aborter,
|
||||
listType: Models.BlockListType,
|
||||
options: IBlockBlobGetBlockListOptions = {}
|
||||
): Promise<Models.BlockBlobGetBlockListResponse> {
|
||||
const res = await this.blockBlobContext.getBlockList(listType, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions
|
||||
});
|
||||
|
||||
if (!res.committedBlocks) {
|
||||
res.committedBlocks = [];
|
||||
}
|
||||
|
||||
if (!res.uncommittedBlocks) {
|
||||
res.uncommittedBlocks = [];
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
import {
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { BrowserPolicy } from "./policies/BrowserPolicy";
|
||||
|
||||
/**
|
||||
* BrowserPolicyFactory is a factory class helping generating BrowserPolicy objects.
|
||||
*
|
||||
* @export
|
||||
* @class BrowserPolicyFactory
|
||||
* @implements {RequestPolicyFactory}
|
||||
*/
|
||||
export class BrowserPolicyFactory implements RequestPolicyFactory {
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): BrowserPolicy {
|
||||
return new BrowserPolicy(nextPolicy, options);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/**
|
||||
* This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a container.
|
||||
* Setting a value to true means that any SAS which uses these permissions will grant permissions for that operation.
|
||||
* Once all the values are set, this should be serialized with toString and set as the permissions field on a
|
||||
* {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but
|
||||
* the order of the permissions is particular and this class guarantees correctness.
|
||||
*
|
||||
* @export
|
||||
* @class ContainerSASPermissions
|
||||
*/
|
||||
export class ContainerSASPermissions {
|
||||
/**
|
||||
* Creates an {@link ContainerSASPermissions} from the specified permissions string. This method will throw an
|
||||
* Error if it encounters a character that does not correspond to a valid permission.
|
||||
*
|
||||
* @static
|
||||
* @param {string} permissions
|
||||
* @returns
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public static parse(permissions: string) {
|
||||
const containerSASPermissions = new ContainerSASPermissions();
|
||||
|
||||
for (const char of permissions) {
|
||||
switch (char) {
|
||||
case "r":
|
||||
containerSASPermissions.read = true;
|
||||
break;
|
||||
case "a":
|
||||
containerSASPermissions.add = true;
|
||||
break;
|
||||
case "c":
|
||||
containerSASPermissions.create = true;
|
||||
break;
|
||||
case "w":
|
||||
containerSASPermissions.write = true;
|
||||
break;
|
||||
case "d":
|
||||
containerSASPermissions.delete = true;
|
||||
break;
|
||||
case "l":
|
||||
containerSASPermissions.list = true;
|
||||
break;
|
||||
default:
|
||||
throw new RangeError(`Invalid permission ${char}`);
|
||||
}
|
||||
}
|
||||
|
||||
return containerSASPermissions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies Read access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public read: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Add access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public add: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Create access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public create: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Write access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public write: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies Delete access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public delete: boolean = false;
|
||||
|
||||
/**
|
||||
* Specifies List access granted.
|
||||
*
|
||||
* @type {boolean}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public list: boolean = false;
|
||||
|
||||
/**
|
||||
* Converts the given permissions to a string. Using this method will guarantee the permissions are in an
|
||||
* order accepted by the service.
|
||||
*
|
||||
* The order of the characters should be as specified here to ensure correctness.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
*
|
||||
* @returns {string}
|
||||
* @memberof ContainerSASPermissions
|
||||
*/
|
||||
public toString(): string {
|
||||
const permissions: string[] = [];
|
||||
if (this.read) {
|
||||
permissions.push("r");
|
||||
}
|
||||
if (this.add) {
|
||||
permissions.push("a");
|
||||
}
|
||||
if (this.create) {
|
||||
permissions.push("c");
|
||||
}
|
||||
if (this.write) {
|
||||
permissions.push("w");
|
||||
}
|
||||
if (this.delete) {
|
||||
permissions.push("d");
|
||||
}
|
||||
if (this.list) {
|
||||
permissions.push("l");
|
||||
}
|
||||
return permissions.join("");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,613 @@
|
|||
import { HttpResponse } from "ms-rest-js";
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { Container } from "./generated/operations";
|
||||
import { IContainerAccessConditions, IMetadata } from "./models";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { ServiceURL } from "./ServiceURL";
|
||||
import { StorageURL } from "./StorageURL";
|
||||
import { ETagNone } from "./utils/constants";
|
||||
import { appendToURLPath, truncatedISO8061Date } from "./utils/utils.common";
|
||||
|
||||
export interface IContainerCreateOptions {
|
||||
metadata?: IMetadata;
|
||||
access?: Models.PublicAccessType;
|
||||
}
|
||||
|
||||
export interface IContainerGetPropertiesOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerDeleteMethodOptions {
|
||||
containerAccessConditions?: IContainerAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerSetMetadataOptions {
|
||||
metadata?: IMetadata;
|
||||
containerAccessConditions?: IContainerAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerGetAccessPolicyOptions {
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
export interface ISignedIdentifier {
|
||||
/**
|
||||
* @member {string} id a unique id
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* @member {AccessPolicy} accessPolicy
|
||||
*/
|
||||
accessPolicy: {
|
||||
/**
|
||||
* @member {Date} start the date-time the policy is active. A validate ISO string format, or Date
|
||||
*/
|
||||
start: Date;
|
||||
/**
|
||||
* @member {string} expiry the date-time the policy expires. A validate ISO string format, or Date
|
||||
*/
|
||||
expiry: Date;
|
||||
/**
|
||||
* @member {string} permission the permissions for the acl policy
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-acl
|
||||
*/
|
||||
permission: string;
|
||||
};
|
||||
}
|
||||
|
||||
export declare type ContainerGetAccessPolicyResponse = {
|
||||
signedIdentifiers: ISignedIdentifier[];
|
||||
} & Models.ContainerGetAccessPolicyHeaders & {
|
||||
/**
|
||||
* The underlying HTTP response.
|
||||
*/
|
||||
_response: HttpResponse & {
|
||||
/**
|
||||
* The parsed HTTP response headers.
|
||||
*/
|
||||
parsedHeaders: Models.ContainerGetAccessPolicyHeaders;
|
||||
/**
|
||||
* The response body as text (string format)
|
||||
*/
|
||||
bodyAsText: string;
|
||||
/**
|
||||
* The response body as parsed JSON or XML
|
||||
*/
|
||||
parsedBody: Models.SignedIdentifier[];
|
||||
};
|
||||
};
|
||||
|
||||
export interface IContainerSetAccessPolicyOptions {
|
||||
containerAccessConditions?: IContainerAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerAcquireLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerReleaseLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerRenewLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerBreakLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerChangeLeaseOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
export interface IContainerListBlobsSegmentOptions {
|
||||
/**
|
||||
* @member {string} [prefix] Filters the results to return only containers
|
||||
* whose name begins with the specified prefix.
|
||||
*/
|
||||
prefix?: string;
|
||||
/**
|
||||
* @member {number} [maxresults] Specifies the maximum number of containers
|
||||
* to return. If the request does not specify maxresults, or specifies a
|
||||
* value greater than 5000, the server will return up to 5000 items. Note
|
||||
* that if the listing operation crosses a partition boundary, then the
|
||||
* service will return a continuation token for retrieving the remainder of
|
||||
* the results. For this reason, it is possible that the service will return
|
||||
* fewer results than specified by maxresults, or than the default of 5000.
|
||||
*/
|
||||
maxresults?: number;
|
||||
/**
|
||||
* @member {ListBlobsIncludeItem[]} [include] Include this parameter to
|
||||
* specify one or more datasets to include in the response.
|
||||
*/
|
||||
include?: Models.ListBlobsIncludeItem[];
|
||||
}
|
||||
|
||||
/**
|
||||
* A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||
*
|
||||
* @export
|
||||
* @class ContainerURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class ContainerURL extends StorageURL {
|
||||
/**
|
||||
* Creates a ContainerURL object from ServiceURL
|
||||
* @param serviceURL
|
||||
* @param containerName
|
||||
*/
|
||||
public static fromServiceURL(
|
||||
serviceURL: ServiceURL,
|
||||
containerName: string
|
||||
): ContainerURL {
|
||||
return new ContainerURL(
|
||||
appendToURLPath(serviceURL.url, containerName),
|
||||
serviceURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* containersContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {Containers}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
private containerContext: Container;
|
||||
|
||||
/**
|
||||
* Creates an instance of ContainerURL.
|
||||
* @param {string} url A URL string pointing to Azure Storage blob container, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.containerContext = new Container(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ContainerURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {ContainerURL}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): ContainerURL {
|
||||
return new ContainerURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new container under the specified account. If the container with
|
||||
* the same name already exists, the operation fails.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IContainerCreateOptions} [options]
|
||||
* @returns {Promise<Models.ContainerCreateResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async create(
|
||||
aborter: Aborter,
|
||||
options: IContainerCreateOptions = {}
|
||||
): Promise<Models.ContainerCreateResponse> {
|
||||
// Spread operator in destructuring assignments,
|
||||
// this will filter out unwanted properties from the response object into result object
|
||||
return this.containerContext.create({
|
||||
...options,
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all user-defined metadata and system properties for the specified
|
||||
* container. The data returned does not include the container's list of blobs.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IContainersGetPropertiesOptions} [options]
|
||||
* @returns {Promise<Models.ContainerGetPropertiesResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async getProperties(
|
||||
aborter: Aborter,
|
||||
options: IContainerGetPropertiesOptions = {}
|
||||
): Promise<Models.ContainerGetPropertiesResponse> {
|
||||
if (!options.leaseAccessConditions) {
|
||||
options.leaseAccessConditions = {};
|
||||
}
|
||||
|
||||
return this.containerContext.getProperties({
|
||||
abortSignal: aborter,
|
||||
...options.leaseAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the specified container for deletion. The container and any blobs
|
||||
* contained within it are later deleted during garbage collection.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.ContainersDeleteMethodOptionalParams} [options]
|
||||
* @returns {Promise<Models.ContainerDeleteResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async delete(
|
||||
aborter: Aborter,
|
||||
options: IContainerDeleteMethodOptions = {}
|
||||
): Promise<Models.ContainerDeleteResponse> {
|
||||
if (!options.containerAccessConditions) {
|
||||
options.containerAccessConditions = {};
|
||||
}
|
||||
|
||||
if (!options.containerAccessConditions.modifiedAccessConditions) {
|
||||
options.containerAccessConditions.modifiedAccessConditions = {};
|
||||
}
|
||||
|
||||
if (!options.containerAccessConditions.leaseAccessConditions) {
|
||||
options.containerAccessConditions.leaseAccessConditions = {};
|
||||
}
|
||||
|
||||
if (
|
||||
(options.containerAccessConditions.modifiedAccessConditions.ifMatch &&
|
||||
options.containerAccessConditions.modifiedAccessConditions.ifMatch !==
|
||||
ETagNone) ||
|
||||
(options.containerAccessConditions.modifiedAccessConditions.ifNoneMatch &&
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
.ifNoneMatch !== ETagNone)
|
||||
) {
|
||||
throw new RangeError(
|
||||
"the IfMatch and IfNoneMatch access conditions must have their default\
|
||||
values because they are ignored by the service"
|
||||
);
|
||||
}
|
||||
|
||||
return this.containerContext.deleteMethod({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions:
|
||||
options.containerAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets one or more user-defined name-value pairs for the specified container.
|
||||
*
|
||||
* If no option provided, or no metadata defined in the option parameter, the container
|
||||
* metadata will be removed.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IContainerSetMetadataOptions} [options]
|
||||
* @returns {Promise<Models.ContainerSetMetadataResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async setMetadata(
|
||||
aborter: Aborter,
|
||||
options: IContainerSetMetadataOptions = {}
|
||||
): Promise<Models.ContainerSetMetadataResponse> {
|
||||
if (!options.containerAccessConditions) {
|
||||
options.containerAccessConditions = {};
|
||||
}
|
||||
|
||||
if (!options.containerAccessConditions.modifiedAccessConditions) {
|
||||
options.containerAccessConditions.modifiedAccessConditions = {};
|
||||
}
|
||||
|
||||
if (!options.containerAccessConditions.leaseAccessConditions) {
|
||||
options.containerAccessConditions.leaseAccessConditions = {};
|
||||
}
|
||||
|
||||
if (
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
.ifUnmodifiedSince ||
|
||||
(options.containerAccessConditions.modifiedAccessConditions.ifMatch &&
|
||||
options.containerAccessConditions.modifiedAccessConditions.ifMatch !==
|
||||
ETagNone) ||
|
||||
(options.containerAccessConditions.modifiedAccessConditions.ifNoneMatch &&
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
.ifNoneMatch !== ETagNone)
|
||||
) {
|
||||
throw new RangeError(
|
||||
"the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values\
|
||||
because they are ignored by the blob service"
|
||||
);
|
||||
}
|
||||
|
||||
return this.containerContext.setMetadata({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions:
|
||||
options.containerAccessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the permissions for the specified container. The permissions indicate
|
||||
* whether container data may be accessed publicly.
|
||||
*
|
||||
* WARNING: JavaScript Date will potential lost precision when parsing start and expiry string.
|
||||
* For example, new Date("2018-12-31T03:44:23.8827891Z").toISOString() will get "2018-12-31T03:44:23.882Z".
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-acl
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IContainerGetAccessPolicyOptions} [options]
|
||||
* @returns {Promise<ContainerGetAccessPolicyResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async getAccessPolicy(
|
||||
aborter: Aborter,
|
||||
options: IContainerGetAccessPolicyOptions = {}
|
||||
): Promise<ContainerGetAccessPolicyResponse> {
|
||||
if (!options.leaseAccessConditions) {
|
||||
options.leaseAccessConditions = {};
|
||||
}
|
||||
|
||||
const response = await this.containerContext.getAccessPolicy({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.leaseAccessConditions
|
||||
});
|
||||
|
||||
const res: ContainerGetAccessPolicyResponse = {
|
||||
_response: response._response,
|
||||
blobPublicAccess: response.blobPublicAccess,
|
||||
date: response.date,
|
||||
eTag: response.eTag,
|
||||
errorCode: response.errorCode,
|
||||
lastModified: response.lastModified,
|
||||
requestId: response.requestId,
|
||||
signedIdentifiers: [],
|
||||
version: response.version
|
||||
};
|
||||
|
||||
for (const identifier of response) {
|
||||
res.signedIdentifiers.push({
|
||||
accessPolicy: {
|
||||
expiry: new Date(identifier.accessPolicy.expiry),
|
||||
permission: identifier.accessPolicy.permission,
|
||||
start: new Date(identifier.accessPolicy.start)
|
||||
},
|
||||
id: identifier.id
|
||||
});
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the permissions for the specified container. The permissions indicate
|
||||
* whether blobs in a container may be accessed publicly.
|
||||
*
|
||||
* When you set permissions for a container, the existing permissions are replaced.
|
||||
* If no access or containerAcl provided, the existing container ACL will be
|
||||
* removed.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-acl
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {PublicAccessType} [access]
|
||||
* @param {ISignedIdentifier[]} [containerAcl]
|
||||
* @param {IContainerSetAccessPolicyOptions} [options]
|
||||
* @returns {Promise<Models.ContainerSetAccessPolicyResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async setAccessPolicy(
|
||||
aborter: Aborter,
|
||||
access?: Models.PublicAccessType,
|
||||
containerAcl?: ISignedIdentifier[],
|
||||
options: IContainerSetAccessPolicyOptions = {}
|
||||
): Promise<Models.ContainerSetAccessPolicyResponse> {
|
||||
options.containerAccessConditions = options.containerAccessConditions || {};
|
||||
const acl: Models.SignedIdentifier[] = [];
|
||||
for (const identifier of containerAcl || []) {
|
||||
acl.push({
|
||||
accessPolicy: {
|
||||
expiry: truncatedISO8061Date(identifier.accessPolicy.expiry),
|
||||
permission: identifier.accessPolicy.permission,
|
||||
start: truncatedISO8061Date(identifier.accessPolicy.start)
|
||||
},
|
||||
id: identifier.id
|
||||
});
|
||||
}
|
||||
|
||||
return this.containerContext.setAccessPolicy({
|
||||
abortSignal: aborter,
|
||||
access,
|
||||
containerAcl: acl,
|
||||
leaseAccessConditions:
|
||||
options.containerAccessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.containerAccessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Establishes and manages a lock on a container for delete operations.
|
||||
* The lock duration can be 15 to 60 seconds, or can be infinite.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} proposedLeaseId Can be specified in any valid GUID string format
|
||||
* @param {number} duration Must be between 15 to 60 seconds, or infinite (-1)
|
||||
* @param {IContainerAcquireLeaseOptions} [options]
|
||||
* @returns {Promise<Models.ContainerAcquireLeaseResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async acquireLease(
|
||||
aborter: Aborter,
|
||||
proposedLeaseId: string,
|
||||
duration: number,
|
||||
options: IContainerAcquireLeaseOptions = {}
|
||||
): Promise<Models.ContainerAcquireLeaseResponse> {
|
||||
return this.containerContext.acquireLease({
|
||||
abortSignal: aborter,
|
||||
duration,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions,
|
||||
proposedLeaseId
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To free the lease if it is no longer needed so that another client may
|
||||
* immediately acquire a lease against the container.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {IContainerReleaseLeaseOptions} [options]
|
||||
* @returns {Promise<Models.ContainerReleaseLeaseResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async releaseLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
options: IContainerReleaseLeaseOptions = {}
|
||||
): Promise<Models.ContainerReleaseLeaseResponse> {
|
||||
return this.containerContext.releaseLease(leaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To renew an existing lease.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {IContainerRenewLeaseOptions} [options]
|
||||
* @returns {Promise<Models.ContainerRenewLeaseResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async renewLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
options: IContainerRenewLeaseOptions = {}
|
||||
): Promise<Models.ContainerRenewLeaseResponse> {
|
||||
return this.containerContext.renewLease(leaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To end the lease but ensure that another client cannot acquire a new lease
|
||||
* until the current lease period has expired.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} period break period
|
||||
* @param {IContainerBreakLeaseOptions} [options]
|
||||
* @returns {Promise<Models.ContainerBreakLeaseResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async breakLease(
|
||||
aborter: Aborter,
|
||||
period: number,
|
||||
options: IContainerBreakLeaseOptions = {}
|
||||
): Promise<Models.ContainerBreakLeaseResponse> {
|
||||
return this.containerContext.breakLease({
|
||||
abortSignal: aborter,
|
||||
breakPeriod: period,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* To change the ID of an existing lease.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/lease-container
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} leaseId
|
||||
* @param {string} proposedLeaseId
|
||||
* @param {IContainerChangeLeaseOptions} [options]
|
||||
* @returns {Promise<Models.ContainerChangeLeaseResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async changeLease(
|
||||
aborter: Aborter,
|
||||
leaseId: string,
|
||||
proposedLeaseId: string,
|
||||
options: IContainerChangeLeaseOptions = {}
|
||||
): Promise<Models.ContainerChangeLeaseResponse> {
|
||||
return this.containerContext.changeLease(leaseId, proposedLeaseId, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* listBlobFlatSegment returns a single segment of blobs starting from the
|
||||
* specified Marker. Use an empty Marker to start enumeration from the beginning.
|
||||
* After getting a segment, process it, and then call ListBlobsFlatSegment again
|
||||
* (passing the the previously-returned Marker) to get the next segment.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/list-blobs
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} [marker]
|
||||
* @param {IContainerListBlobsSegmentOptions} [options]
|
||||
* @returns {Promise<Models.ContainerListBlobFlatSegmentResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async listBlobFlatSegment(
|
||||
aborter: Aborter,
|
||||
marker?: string,
|
||||
options: IContainerListBlobsSegmentOptions = {}
|
||||
): Promise<Models.ContainerListBlobFlatSegmentResponse> {
|
||||
return this.containerContext.listBlobFlatSegment({
|
||||
abortSignal: aborter,
|
||||
marker,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* listBlobHierarchySegment returns a single segment of blobs starting from
|
||||
* the specified Marker. Use an empty Marker to start enumeration from the
|
||||
* beginning. After getting a segment, process it, and then call ListBlobsHierarchicalSegment
|
||||
* again (passing the the previously-returned Marker) to get the next segment.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/list-blobs
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} delimiter
|
||||
* @param {IContainerListBlobsSegmentOptions} [options]
|
||||
* @returns {Promise<Models.ContainerListBlobHierarchySegmentResponse>}
|
||||
* @memberof ContainerURL
|
||||
*/
|
||||
public async listBlobHierarchySegment(
|
||||
aborter: Aborter,
|
||||
delimiter: string,
|
||||
marker?: string,
|
||||
options: IContainerListBlobsSegmentOptions = {}
|
||||
): Promise<Models.ContainerListBlobHierarchySegmentResponse> {
|
||||
return this.containerContext.listBlobHierarchySegment(delimiter, {
|
||||
abortSignal: aborter,
|
||||
marker,
|
||||
...options
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
import { AccountSASPermissions } from "./AccountSASPermissions";
|
||||
import { AccountSASResourceTypes } from "./AccountSASResourceTypes";
|
||||
import { AccountSASServices } from "./AccountSASServices";
|
||||
import { SharedKeyCredential } from "./credentials/SharedKeyCredential";
|
||||
import { IIPRange, ipRangeToString } from "./IIPRange";
|
||||
import { SASProtocol, SASQueryParameters } from "./SASQueryParameters";
|
||||
import { SERVICE_VERSION } from "./utils/constants";
|
||||
import { truncatedISO8061Date } from "./utils/utils.common";
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* IAccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. Once
|
||||
* all the values here are set appropriately, call generateSASQueryParameters() to obtain a representation of the SAS
|
||||
* which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because
|
||||
* the former is mutable and a logical representation while the latter is immutable and used to generate actual REST
|
||||
* requests.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1
|
||||
* for more conceptual information on SAS
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
* for descriptions of the parameters, including which are required
|
||||
*
|
||||
* @export
|
||||
* @class IAccountSASSignatureValues
|
||||
*/
|
||||
export interface IAccountSASSignatureValues {
|
||||
/**
|
||||
* If not provided, this defaults to the service version targeted by this version of the library.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
version?: string;
|
||||
|
||||
/**
|
||||
* Optional. SAS protocols allowed.
|
||||
*
|
||||
* @type {SASProtocol}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
protocol?: SASProtocol;
|
||||
|
||||
/**
|
||||
* Optional. When the SAS will take effect.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
startTime?: Date;
|
||||
|
||||
/**
|
||||
* The time after which the SAS will no longer work.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
expiryTime: Date;
|
||||
|
||||
/**
|
||||
* Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermissions} for help
|
||||
* constructing the permissions string.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
permissions: string;
|
||||
|
||||
/**
|
||||
* Optional. IP range allowed.
|
||||
*
|
||||
* @type {IIPRange}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
ipRange?: IIPRange;
|
||||
|
||||
/**
|
||||
* The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to
|
||||
* construct this value.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
services: string;
|
||||
|
||||
/**
|
||||
* The values that indicate the resource types accessible with this SAS. Please refer
|
||||
* to {@link AccountSASResourceType} to construct this value.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
resourceTypes: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Generates a {@link SASQueryParameters} object which contains all SAS query parameters needed to make an actual
|
||||
* REST request.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
*
|
||||
* @param {SharedKeyCredential} sharedKeyCredential
|
||||
* @returns {SASQueryParameters}
|
||||
* @memberof IAccountSASSignatureValues
|
||||
*/
|
||||
export function generateAccountSASQueryParameters(
|
||||
accountSASSignatureValues: IAccountSASSignatureValues,
|
||||
sharedKeyCredential: SharedKeyCredential
|
||||
): SASQueryParameters {
|
||||
const version = accountSASSignatureValues.version
|
||||
? accountSASSignatureValues.version
|
||||
: SERVICE_VERSION;
|
||||
|
||||
const parsedPermissions = AccountSASPermissions.parse(
|
||||
accountSASSignatureValues.permissions
|
||||
).toString();
|
||||
const parsedServices = AccountSASServices.parse(
|
||||
accountSASSignatureValues.services
|
||||
).toString();
|
||||
const parsedResourceTypes = AccountSASResourceTypes.parse(
|
||||
accountSASSignatureValues.resourceTypes
|
||||
).toString();
|
||||
|
||||
const stringToSign = [
|
||||
sharedKeyCredential.accountName,
|
||||
parsedPermissions,
|
||||
parsedServices,
|
||||
parsedResourceTypes,
|
||||
accountSASSignatureValues.startTime
|
||||
? truncatedISO8061Date(accountSASSignatureValues.startTime)
|
||||
: "",
|
||||
truncatedISO8061Date(accountSASSignatureValues.expiryTime),
|
||||
accountSASSignatureValues.ipRange
|
||||
? ipRangeToString(accountSASSignatureValues.ipRange)
|
||||
: "",
|
||||
accountSASSignatureValues.protocol
|
||||
? accountSASSignatureValues.protocol
|
||||
: "",
|
||||
version,
|
||||
"" // Account SAS requires an additional newline character
|
||||
].join("\n");
|
||||
|
||||
const signature: string = sharedKeyCredential.computeHMACSHA256(stringToSign);
|
||||
|
||||
return new SASQueryParameters(
|
||||
version,
|
||||
parsedPermissions,
|
||||
signature,
|
||||
parsedServices,
|
||||
parsedResourceTypes,
|
||||
accountSASSignatureValues.protocol,
|
||||
accountSASSignatureValues.startTime,
|
||||
accountSASSignatureValues.expiryTime,
|
||||
accountSASSignatureValues.ipRange
|
||||
);
|
||||
}
|
|
@ -0,0 +1,251 @@
|
|||
import { BlobSASPermissions } from "./BlobSASPermissions";
|
||||
import { ContainerSASPermissions } from "./ContainerSASPermissions";
|
||||
import { SharedKeyCredential } from "./credentials/SharedKeyCredential";
|
||||
import { IIPRange, ipRangeToString } from "./IIPRange";
|
||||
import { SASProtocol } from "./SASQueryParameters";
|
||||
import { SASQueryParameters } from "./SASQueryParameters";
|
||||
import { SERVICE_VERSION } from "./utils/constants";
|
||||
import { truncatedISO8061Date } from "./utils/utils.common";
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* IBlobSASSignatureValues is used to help generating Blob service SAS tokens for containers or blobs.
|
||||
*
|
||||
* @export
|
||||
* @class IBlobSASSignatureValues
|
||||
*/
|
||||
export interface IBlobSASSignatureValues {
|
||||
/**
|
||||
* The version of the service this SAS will target. If not specified, it will default to the version targeted by the
|
||||
* library.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
version?: string;
|
||||
|
||||
/**
|
||||
* Optional. SAS protocols, HTTPS only or HTTPSandHTTP
|
||||
*
|
||||
* @type {SASProtocol}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
protocol?: SASProtocol;
|
||||
|
||||
/**
|
||||
* Optional. When the SAS will take effect.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
startTime?: Date;
|
||||
|
||||
/**
|
||||
* Optional only when identifier is provided. The time after which the SAS will no longer work.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
expiryTime?: Date;
|
||||
|
||||
/**
|
||||
* Optional only when identifier is provided.
|
||||
* Please refer to either {@link ContainerSASPermissions} or {@link BlobSASPermissions} depending on the resource
|
||||
* being accessed for help constructing the permissions string.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
permissions?: string;
|
||||
|
||||
/**
|
||||
* Optional. IP ranges allowed in this SAS.
|
||||
*
|
||||
* @type {IIPRange}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
ipRange?: IIPRange;
|
||||
|
||||
/**
|
||||
* The name of the container the SAS user may access.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
containerName: string;
|
||||
|
||||
/**
|
||||
* Optional. The name of the container the SAS user may access.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
blobName?: string;
|
||||
|
||||
/**
|
||||
* Optional. The name of the access policy on the container this SAS references if any.
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/establishing-a-stored-access-policy
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
identifier?: string;
|
||||
|
||||
/**
|
||||
* Optional. The cache-control header for the SAS.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
cacheControl?: string;
|
||||
|
||||
/**
|
||||
* Optional. The content-disposition header for the SAS.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
contentDisposition?: string;
|
||||
|
||||
/**
|
||||
* Optional. The content-encoding header for the SAS.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
contentEncoding?: string;
|
||||
|
||||
/**
|
||||
* Optional. The content-language header for the SAS.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
contentLanguage?: string;
|
||||
|
||||
/**
|
||||
* Optional. The content-type header for the SAS.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IBlobSASSignatureValues
|
||||
*/
|
||||
contentType?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Creates an instance of SASQueryParameters.
|
||||
*
|
||||
* Only accepts required settings needed to create a SAS. For optional settings please
|
||||
* set corresponding properties directly, such as permissions, startTime and identifier.
|
||||
*
|
||||
* WARNING: When identifier is not provided, permissions and expiryTime are required.
|
||||
* You MUST assign value to identifier or expiryTime & permissions manually if you initial with
|
||||
* this constructor.
|
||||
*
|
||||
* @export
|
||||
* @param {IBlobSASSignatureValues} blobSASSignatureValues
|
||||
* @param {SharedKeyCredential} sharedKeyCredential
|
||||
* @returns {SASQueryParameters}
|
||||
*/
|
||||
export function generateBlobSASQueryParameters(
|
||||
blobSASSignatureValues: IBlobSASSignatureValues,
|
||||
sharedKeyCredential: SharedKeyCredential
|
||||
): SASQueryParameters {
|
||||
if (
|
||||
!blobSASSignatureValues.identifier &&
|
||||
(!blobSASSignatureValues.permissions && !blobSASSignatureValues.expiryTime)
|
||||
) {
|
||||
throw new RangeError(
|
||||
"Must provide 'permissions' and 'expiryTime' for Blob SAS generation when 'identifier' is not provided."
|
||||
);
|
||||
}
|
||||
|
||||
const version = blobSASSignatureValues.version
|
||||
? blobSASSignatureValues.version
|
||||
: SERVICE_VERSION;
|
||||
let resource: string = "c";
|
||||
let verifiedPermissions: string | undefined;
|
||||
|
||||
// Calling parse and toString guarantees the proper ordering and throws on invalid characters.
|
||||
if (blobSASSignatureValues.permissions) {
|
||||
if (blobSASSignatureValues.blobName) {
|
||||
verifiedPermissions = BlobSASPermissions.parse(
|
||||
blobSASSignatureValues.permissions
|
||||
).toString();
|
||||
resource = "b";
|
||||
} else {
|
||||
verifiedPermissions = ContainerSASPermissions.parse(
|
||||
blobSASSignatureValues.permissions
|
||||
).toString();
|
||||
}
|
||||
}
|
||||
|
||||
// Signature is generated on the un-url-encoded values.
|
||||
const stringToSign = [
|
||||
verifiedPermissions ? verifiedPermissions : "",
|
||||
blobSASSignatureValues.startTime
|
||||
? truncatedISO8061Date(blobSASSignatureValues.startTime)
|
||||
: "",
|
||||
blobSASSignatureValues.expiryTime
|
||||
? truncatedISO8061Date(blobSASSignatureValues.expiryTime)
|
||||
: "",
|
||||
getCanonicalName(
|
||||
sharedKeyCredential.accountName,
|
||||
blobSASSignatureValues.containerName,
|
||||
blobSASSignatureValues.blobName
|
||||
),
|
||||
blobSASSignatureValues.identifier,
|
||||
blobSASSignatureValues.ipRange
|
||||
? ipRangeToString(blobSASSignatureValues.ipRange)
|
||||
: "",
|
||||
blobSASSignatureValues.protocol ? blobSASSignatureValues.protocol : "",
|
||||
version,
|
||||
blobSASSignatureValues.cacheControl
|
||||
? blobSASSignatureValues.cacheControl
|
||||
: "",
|
||||
blobSASSignatureValues.contentDisposition
|
||||
? blobSASSignatureValues.contentDisposition
|
||||
: "",
|
||||
blobSASSignatureValues.contentEncoding
|
||||
? blobSASSignatureValues.contentEncoding
|
||||
: "",
|
||||
blobSASSignatureValues.contentLanguage
|
||||
? blobSASSignatureValues.contentLanguage
|
||||
: "",
|
||||
blobSASSignatureValues.contentType ? blobSASSignatureValues.contentType : ""
|
||||
].join("\n");
|
||||
|
||||
const signature = sharedKeyCredential.computeHMACSHA256(stringToSign);
|
||||
|
||||
return new SASQueryParameters(
|
||||
version,
|
||||
signature,
|
||||
verifiedPermissions,
|
||||
undefined,
|
||||
undefined,
|
||||
blobSASSignatureValues.protocol,
|
||||
blobSASSignatureValues.startTime,
|
||||
blobSASSignatureValues.expiryTime,
|
||||
blobSASSignatureValues.ipRange,
|
||||
blobSASSignatureValues.identifier,
|
||||
resource
|
||||
);
|
||||
}
|
||||
|
||||
function getCanonicalName(
|
||||
accountName: string,
|
||||
containerName: string,
|
||||
blobName?: string
|
||||
): string {
|
||||
// Container: "/blob/account/containerName"
|
||||
// Blob: "/blob/account/containerName/blobName"
|
||||
const elements: string[] = [`/blob/${accountName}/${containerName}`];
|
||||
if (blobName) {
|
||||
elements.push(`/${blobName}`);
|
||||
}
|
||||
return elements.join("");
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Allowed IP range for a SAS.
|
||||
*
|
||||
* @export
|
||||
* @interface IIPRange
|
||||
*/
|
||||
export interface IIPRange {
|
||||
/**
|
||||
* Starting IP address in the IP range.
|
||||
* If end IP doesn't provide, start IP will the only IP allowed.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IPRange
|
||||
*/
|
||||
start: string;
|
||||
/**
|
||||
* Optional. IP address that ends the IP range.
|
||||
* If not provided, start IP will the only IP allowed.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IPRange
|
||||
*/
|
||||
end?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate IPRange format string. For example:
|
||||
*
|
||||
* "8.8.8.8" or "1.1.1.1-255.255.255.255"
|
||||
*
|
||||
* @export
|
||||
* @param {IIPRange} ipRange
|
||||
* @returns {string}
|
||||
*/
|
||||
export function ipRangeToString(ipRange: IIPRange): string {
|
||||
return ipRange.end ? `${ipRange.start}-${ipRange.end}` : ipRange.start;
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
// tslint:disable:max-line-length
|
||||
/**
|
||||
* Range for Blob Service Operations.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-the-range-header-for-blob-service-operations
|
||||
*
|
||||
* @export
|
||||
* @interface IRange
|
||||
*/
|
||||
export interface IRange {
|
||||
/**
|
||||
* StartByte, larger than or equal 0.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IRange
|
||||
*/
|
||||
offset: number;
|
||||
/**
|
||||
* Optional. Count of bytes, larger than 0.
|
||||
* If not provided, will return bytes from offset to the end.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IRange
|
||||
*/
|
||||
count?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a range string. For example:
|
||||
*
|
||||
* "bytes=255-" or "bytes=0-511"
|
||||
*
|
||||
* @export
|
||||
* @param {IRange} iRange
|
||||
* @returns {string}
|
||||
*/
|
||||
export function rangeToString(iRange: IRange): string {
|
||||
if (iRange.offset < 0) {
|
||||
throw new RangeError(`IRange.offset cannot be smaller than 0.`);
|
||||
}
|
||||
if (iRange.count && iRange.count <= 0) {
|
||||
throw new RangeError(
|
||||
`IRange.count must be larger than 0. Leave it undefined if you want a range from offset to the end.`
|
||||
);
|
||||
}
|
||||
return iRange.count
|
||||
? `bytes=${iRange.offset}-${iRange.offset + iRange.count - 1}`
|
||||
: `bytes=${iRange.offset}-`;
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
import {
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { LoggingPolicy } from "./policies/LoggingPolicy";
|
||||
|
||||
/**
|
||||
* RequestLogOptions configures the retry policy's behavior.
|
||||
*
|
||||
* @export
|
||||
* @interface IRequestLogOptions
|
||||
*/
|
||||
export interface IRequestLogOptions {
|
||||
/**
|
||||
* LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
||||
* duration in ms. Default is 3000ms.
|
||||
* @type {number}
|
||||
* @memberof IRequestLogOptions
|
||||
*/
|
||||
logWarningIfTryOverThreshold: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* LoggingPolicyFactory is a factory class helping generating LoggingPolicy objects.
|
||||
*
|
||||
* @export
|
||||
* @class LoggingPolicyFactory
|
||||
* @implements {RequestPolicyFactory}
|
||||
*/
|
||||
export class LoggingPolicyFactory implements RequestPolicyFactory {
|
||||
private readonly loggingOptions?: IRequestLogOptions;
|
||||
|
||||
constructor(loggingOptions?: IRequestLogOptions) {
|
||||
this.loggingOptions = loggingOptions;
|
||||
}
|
||||
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): LoggingPolicy {
|
||||
return new LoggingPolicy(nextPolicy, options, this.loggingOptions);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,382 @@
|
|||
import { HttpRequestBody, TransferProgressEvent } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { BlobURL } from "./BlobURL";
|
||||
import { ContainerURL } from "./ContainerURL";
|
||||
import { PageBlob } from "./generated/operations";
|
||||
import { rangeToString } from "./IRange";
|
||||
import {
|
||||
IBlobAccessConditions,
|
||||
IMetadata,
|
||||
IPageBlobAccessConditions
|
||||
} from "./models";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { URLConstants } from "./utils/constants";
|
||||
import { appendToURLPath, setURLParameter } from "./utils/utils.common";
|
||||
|
||||
export interface IPageBlobCreateOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
blobSequenceNumber?: number;
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
metadata?: IMetadata;
|
||||
}
|
||||
|
||||
export interface IPageBlobUploadPagesOptions {
|
||||
accessConditions?: IPageBlobAccessConditions;
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
transactionalContentMD5?: Uint8Array;
|
||||
}
|
||||
|
||||
export interface IPageBlobClearPagesOptions {
|
||||
accessConditions?: IPageBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IPageBlobGetPageRangesOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IPageBlobGetPageRangesDiffOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
range?: string;
|
||||
}
|
||||
|
||||
export interface IPageBlobResizeOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IPageBlobUpdateSequenceNumberOptions {
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
}
|
||||
|
||||
export interface IPageBlobStartCopyIncrementalOptions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
}
|
||||
|
||||
/**
|
||||
* PageBlobURL defines a set of operations applicable to page blobs.
|
||||
*
|
||||
* @export
|
||||
* @class PageBlobURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class PageBlobURL extends BlobURL {
|
||||
/**
|
||||
* Creates a PageBlobURL object from ContainerURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {ContainerURL} containerURL
|
||||
* @param {string} blobName
|
||||
* @returns {PageBlobURL}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public static fromContainerURL(
|
||||
containerURL: ContainerURL,
|
||||
blobName: string
|
||||
): PageBlobURL {
|
||||
return new PageBlobURL(
|
||||
appendToURLPath(containerURL.url, blobName),
|
||||
containerURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a PageBlobURL object from BlobURL instance.
|
||||
*
|
||||
* @static
|
||||
* @param {BlobURL} blobURL
|
||||
* @returns {PageBlobURL}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public static fromBlobURL(blobURL: BlobURL): PageBlobURL {
|
||||
return new PageBlobURL(blobURL.url, blobURL.pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* pageBlobsContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {PageBlobs}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
private pageBlobContext: PageBlob;
|
||||
|
||||
/**
|
||||
* Creates an instance of PageBlobURL.
|
||||
* @param {string} url A URL string pointing to Azure Storage page blob, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/pageblob". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.blob.core.windows.net/mycontainer/pageblob?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.pageBlobContext = new PageBlob(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new PageBlobURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {PageBlobURL}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): PageBlobURL {
|
||||
return new PageBlobURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new PageBlobURL object identical to the source but with the
|
||||
* specified snapshot timestamp.
|
||||
* Provide "" will remove the snapshot and return a URL to the base blob.
|
||||
*
|
||||
* @param {string} snapshot
|
||||
* @returns {PageBlobURL}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public withSnapshot(snapshot: string): PageBlobURL {
|
||||
return new PageBlobURL(
|
||||
setURLParameter(
|
||||
this.url,
|
||||
URLConstants.Parameters.SNAPSHOT,
|
||||
snapshot.length === 0 ? undefined : snapshot
|
||||
),
|
||||
this.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a page blob of the specified length. Call uploadPages to upload data
|
||||
* data to a page blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-blob
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} size
|
||||
* @param {IPageBlobCreateOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobCreateResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async create(
|
||||
aborter: Aborter,
|
||||
size: number,
|
||||
options: IPageBlobCreateOptions = {}
|
||||
): Promise<Models.PageBlobCreateResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.create(0, size, {
|
||||
abortSignal: aborter,
|
||||
blobHTTPHeaders: options.blobHTTPHeaders,
|
||||
blobSequenceNumber: options.blobSequenceNumber,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
metadata: options.metadata,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-page
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {HttpRequestBody} body
|
||||
* @param {number} offset Offset of destination page blob
|
||||
* @param {number} count Content length of body, also how many bytes to be uploaded
|
||||
* @param {IPageBlobUploadPagesOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobsUploadPagesResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async uploadPages(
|
||||
aborter: Aborter,
|
||||
body: HttpRequestBody,
|
||||
offset: number,
|
||||
count: number,
|
||||
options: IPageBlobUploadPagesOptions = {}
|
||||
): Promise<Models.PageBlobUploadPagesResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.uploadPages(body, count, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
onUploadProgress: options.progress,
|
||||
range: rangeToString({ offset, count }),
|
||||
sequenceNumberAccessConditions:
|
||||
options.accessConditions.sequenceNumberAccessConditions,
|
||||
transactionalContentMD5: options.transactionalContentMD5
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees the specified pages from the page blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/put-page
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} offset
|
||||
* @param {number} count
|
||||
* @param {IPageBlobClearPagesOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobClearPagesResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async clearPages(
|
||||
aborter: Aborter,
|
||||
offset: number,
|
||||
count: number,
|
||||
options: IPageBlobClearPagesOptions = {}
|
||||
): Promise<Models.PageBlobClearPagesResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.clearPages(0, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
range: rangeToString({ offset, count }),
|
||||
sequenceNumberAccessConditions:
|
||||
options.accessConditions.sequenceNumberAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} offset
|
||||
* @param {number} count
|
||||
* @param {IPageBlobGetPageRangesOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobGetPageRangesResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async getPageRanges(
|
||||
aborter: Aborter,
|
||||
offset: number,
|
||||
count: number,
|
||||
options: IPageBlobGetPageRangesOptions = {}
|
||||
): Promise<Models.PageBlobGetPageRangesResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.getPageRanges({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
range: rangeToString({ offset, count })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} offset
|
||||
* @param {number} count
|
||||
* @param {string} prevSnapshot
|
||||
* @param {IPageBlobGetPageRangesDiffOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobGetPageRangesDiffResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async getPageRangesDiff(
|
||||
aborter: Aborter,
|
||||
offset: number,
|
||||
count: number,
|
||||
prevSnapshot: string,
|
||||
options: IPageBlobGetPageRangesDiffOptions = {}
|
||||
): Promise<Models.PageBlobGetPageRangesDiffResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.getPageRangesDiff({
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions,
|
||||
prevsnapshot: prevSnapshot,
|
||||
range: rangeToString({ offset, count })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Resizes the page blob to the specified size (which must be a multiple of 512).
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} size
|
||||
* @param {IPageBlobResizeOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobResizeResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async resize(
|
||||
aborter: Aborter,
|
||||
size: number,
|
||||
options: IPageBlobResizeOptions = {}
|
||||
): Promise<Models.PageBlobResizeResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.resize(size, {
|
||||
abortSignal: aborter,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a page blob's sequence number.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.SequenceNumberActionType} sequenceNumberAction
|
||||
* @param {number} [sequenceNumber] Required if sequenceNumberAction is max or update
|
||||
* @param {IPageBlobUpdateSequenceNumberOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobUpdateSequenceNumberResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async updateSequenceNumber(
|
||||
aborter: Aborter,
|
||||
sequenceNumberAction: Models.SequenceNumberActionType,
|
||||
sequenceNumber?: number,
|
||||
options: IPageBlobUpdateSequenceNumberOptions = {}
|
||||
): Promise<Models.PageBlobUpdateSequenceNumberResponse> {
|
||||
options.accessConditions = options.accessConditions || {};
|
||||
return this.pageBlobContext.updateSequenceNumber(sequenceNumberAction, {
|
||||
abortSignal: aborter,
|
||||
blobSequenceNumber: sequenceNumber,
|
||||
leaseAccessConditions: options.accessConditions.leaseAccessConditions,
|
||||
modifiedAccessConditions:
|
||||
options.accessConditions.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
||||
* The snapshot is copied such that only the differential changes between the previously
|
||||
* copied snapshot are transferred to the destination.
|
||||
* The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
||||
* @see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob
|
||||
* @see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} copySource Specifies the name of the source page blob snapshot. For example,
|
||||
* https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
|
||||
* @param {IPageBlobStartCopyIncrementalOptions} [options]
|
||||
* @returns {Promise<Models.PageBlobCopyIncrementalResponse>}
|
||||
* @memberof PageBlobURL
|
||||
*/
|
||||
public async startCopyIncremental(
|
||||
aborter: Aborter,
|
||||
copySource: string,
|
||||
options: IPageBlobStartCopyIncrementalOptions = {}
|
||||
): Promise<Models.PageBlobCopyIncrementalResponse> {
|
||||
return this.pageBlobContext.copyIncremental(copySource, {
|
||||
abortSignal: aborter,
|
||||
modifiedAccessConditions: options.modifiedAccessConditions
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
HttpClient as IHttpClient,
|
||||
HttpHeaders,
|
||||
HttpOperationResponse,
|
||||
HttpPipelineLogger as IHttpPipelineLogger,
|
||||
HttpPipelineLogLevel,
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions,
|
||||
ServiceClientOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
// Export following interfaces and types for customers who want to implement their
|
||||
// own RequestPolicy or HTTPClient
|
||||
export {
|
||||
IHttpClient,
|
||||
IHttpPipelineLogger,
|
||||
HttpHeaders,
|
||||
HttpPipelineLogLevel,
|
||||
HttpOperationResponse,
|
||||
WebResource,
|
||||
BaseRequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions
|
||||
};
|
||||
|
||||
/**
|
||||
* Option interface for Pipeline constructor.
|
||||
*
|
||||
* @export
|
||||
* @interface IPipelineOptions
|
||||
*/
|
||||
export interface IPipelineOptions {
|
||||
logger?: IHttpPipelineLogger;
|
||||
HTTPClient?: IHttpClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Pipeline class containing HTTP request policies.
|
||||
* You can create a default Pipeline by calling StorageURL.newPipeline().
|
||||
* Or you can create a Pipeline with your own policies by the constructor of Pipeline.
|
||||
* Refer to StorageURL.newPipeline() and provided policies as reference before
|
||||
* implementing your customized Pipeline.
|
||||
*
|
||||
* @export
|
||||
* @class Pipeline
|
||||
*/
|
||||
export class Pipeline {
|
||||
public readonly factories: RequestPolicyFactory[];
|
||||
public readonly options: IPipelineOptions;
|
||||
|
||||
/**
|
||||
* Creates an instance of Pipeline. Customize HTTPClient by implementing IHttpClient interface.
|
||||
*
|
||||
* @param {RequestPolicyFactory[]} factories
|
||||
* @param {IPipelineOptions} [options={}]
|
||||
* @memberof Pipeline
|
||||
*/
|
||||
constructor(
|
||||
factories: RequestPolicyFactory[],
|
||||
options: IPipelineOptions = {}
|
||||
) {
|
||||
this.factories = factories;
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfer Pipeline object to ServiceClientOptions object which required by
|
||||
* ServiceClient constructor.
|
||||
*
|
||||
* @returns {ServiceClientOptions}
|
||||
* @memberof Pipeline
|
||||
*/
|
||||
public toServiceClientOptions(): ServiceClientOptions {
|
||||
return {
|
||||
httpClient: this.options.HTTPClient,
|
||||
httpPipelineLogger: this.options.logger,
|
||||
requestPolicyFactories: this.factories
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
import {
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { RetryPolicy, RetryPolicyType } from "./policies/RetryPolicy";
|
||||
|
||||
/**
|
||||
* Retry options interface.
|
||||
*
|
||||
* @export
|
||||
* @interface IRetryOptions
|
||||
*/
|
||||
export interface IRetryOptions {
|
||||
/**
|
||||
* Optional. RetryPolicyType, default is exponential retry policy.
|
||||
*
|
||||
* @type {RetryPolicyType}
|
||||
* @memberof RetryOptions
|
||||
*/
|
||||
readonly retryPolicyType?: RetryPolicyType;
|
||||
|
||||
/**
|
||||
* Optional. Max try number of attempts, default is 4.
|
||||
* A value of 1 means 1 try and no retries.
|
||||
* A value smaller than 1 means default retry number of attempts.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IRetryOptions
|
||||
*/
|
||||
readonly maxTries?: number;
|
||||
|
||||
/**
|
||||
* Optional. Indicates the maximum time in seconds allowed for any single try of an HTTP request.
|
||||
* A value of zero or undefined means that you accept our default timeout, 60s or 60 * 1000ms.
|
||||
*
|
||||
* NOTE: When transferring large amounts of data, the default TryTimeout will probably
|
||||
* not be sufficient. You should override this value based on the bandwidth available to
|
||||
* the host machine and proximity to the Storage service. A good starting point may be something
|
||||
* like (60 seconds per MB of anticipated-payload-size)
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IRetryOptions
|
||||
*/
|
||||
readonly tryTimeout?: number;
|
||||
|
||||
/**
|
||||
* Optional. Specifies the amount of delay to use before retrying an operation (default is 4s or 4 * 1000ms).
|
||||
* The delay increases (exponentially or linearly) with each retry up to a maximum specified by
|
||||
* maxRetryDelayInMs. If you specify 0, then you must also specify 0 for maxRetryDelayInMs.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IRetryOptions
|
||||
*/
|
||||
readonly retryDelayInMs?: number;
|
||||
|
||||
/**
|
||||
* Optional. Specifies the maximum delay allowed before retrying an operation (default is 120s or 120 * 1000ms).
|
||||
* If you specify 0, then you must also specify 0 for retryDelayInMs.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IRetryOptions
|
||||
*/
|
||||
readonly maxRetryDelayInMs?: number;
|
||||
|
||||
/**
|
||||
* If a secondaryHost is specified, retries will be tried against this host. If secondaryHost is undefined
|
||||
* (the default) then operations are not retried against another host.
|
||||
*
|
||||
* NOTE: Before setting this field, make sure you understand the issues around
|
||||
* reading stale and potentially-inconsistent data at
|
||||
* {@link https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs}
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IRetryOptions
|
||||
*/
|
||||
readonly secondaryHost?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* RetryPolicyFactory is a factory class helping generating RetryPolicy objects.
|
||||
*
|
||||
* @export
|
||||
* @class RetryPolicyFactory
|
||||
* @implements {RequestPolicyFactory}
|
||||
*/
|
||||
export class RetryPolicyFactory implements RequestPolicyFactory {
|
||||
private retryOptions?: IRetryOptions;
|
||||
|
||||
/**
|
||||
* Creates an instance of RetryPolicyFactory.
|
||||
* @param {IRetryOptions} [retryOptions]
|
||||
* @memberof RetryPolicyFactory
|
||||
*/
|
||||
constructor(retryOptions?: IRetryOptions) {
|
||||
this.retryOptions = retryOptions;
|
||||
}
|
||||
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): RetryPolicy {
|
||||
return new RetryPolicy(nextPolicy, options, this.retryOptions);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
import { IIPRange, ipRangeToString } from "./IIPRange";
|
||||
import { truncatedISO8061Date } from "./utils/utils.common";
|
||||
|
||||
/**
|
||||
* Protocols for generated SAS.
|
||||
*
|
||||
* @export
|
||||
* @enum {number}
|
||||
*/
|
||||
export enum SASProtocol {
|
||||
/**
|
||||
* Protocol that allows HTTPS only
|
||||
*/
|
||||
HTTPS = "https",
|
||||
|
||||
/**
|
||||
* Protocol that allows both HTTPS and HTTP
|
||||
*/
|
||||
HTTPSandHTTP = "https,http"
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly
|
||||
* by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues}
|
||||
* types. Once generated, it can be encoded into a {@code String} and appended to a URL directly (though caution should
|
||||
* be taken here in case there are existing query parameters, which might affect the appropriate means of appending
|
||||
* these query parameters).
|
||||
*
|
||||
* NOTE: Instances of this class are immutable.
|
||||
*
|
||||
* @export
|
||||
* @class SASQueryParameters
|
||||
*/
|
||||
export class SASQueryParameters {
|
||||
/**
|
||||
* The storage API version.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly version: string;
|
||||
|
||||
/**
|
||||
* Optional. The allowed HTTP protocol(s).
|
||||
*
|
||||
* @type {SASProtocol}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly protocol?: SASProtocol;
|
||||
|
||||
/**
|
||||
* Optional. The start time for this SAS token.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly startTime?: Date;
|
||||
|
||||
/**
|
||||
* Optional only when identifier is provided. The expiry time for this SAS token.
|
||||
*
|
||||
* @type {Date}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly expiryTime?: Date;
|
||||
|
||||
/**
|
||||
* Optional only when identifier is provided.
|
||||
* Please refer to {@link AccountSASPermission}, {@link BlobSASPermission}, or {@link ContainerSASPermission} for
|
||||
* more details.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly permissions?: string;
|
||||
|
||||
/**
|
||||
* Optional. The storage services being accessed (only for Account SAS). Please refer to {@link AccountSASServices}
|
||||
* for more details.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly services?: string;
|
||||
|
||||
/**
|
||||
* Optional. The storage resource types being accessed (only for Account SAS). Please refer to
|
||||
* {@link AccountSASResourceTypes} for more details.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly resourceTypes?: string;
|
||||
|
||||
/**
|
||||
* Optional. The signed identifier (only for {@link ServiceSASSignatureValues}).
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/establishing-a-stored-access-policy
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly identifier?: string;
|
||||
|
||||
/**
|
||||
* Optional. The storage container or blob (only for {@link ServiceSASSignatureValues}).
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly resource?: string;
|
||||
|
||||
/**
|
||||
* The signature for the SAS token.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public readonly signature: string;
|
||||
|
||||
/**
|
||||
* Inner value of getter ipRange.
|
||||
*
|
||||
* @private
|
||||
* @type {IIPRange}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
private readonly ipRangeInner?: IIPRange;
|
||||
|
||||
/**
|
||||
* Optional. IP range allowed for this SAS.
|
||||
*
|
||||
* @readonly
|
||||
* @type {(IIPRange | undefined)}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public get ipRange(): IIPRange | undefined {
|
||||
if (this.ipRangeInner) {
|
||||
return {
|
||||
end: this.ipRangeInner.end,
|
||||
start: this.ipRangeInner.start
|
||||
};
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of SASQueryParameters.
|
||||
*
|
||||
* @param {string} version Representing the storage version
|
||||
* @param {string} signature Representing the signature for the SAS token
|
||||
* @param {string} [permissions] Representing the storage permissions
|
||||
* @param {string} [services] Representing the storage services being accessed (only for Account SAS)
|
||||
* @param {string} [resourceTypes] Representing the storage resource types being accessed (only for Account SAS)
|
||||
* @param {SASProtocol} [protocol] Representing the allowed HTTP protocol(s)
|
||||
* @param {Date} [startTime] Representing the start time for this SAS token
|
||||
* @param {Date} [expiryTime] Representing the expiry time for this SAS token
|
||||
* @param {IIPRange} [ipRange] Representing the range of valid IP addresses for this SAS token
|
||||
* @param {string} [identifier] Representing the signed identifier (only for Service SAS)
|
||||
* @param {string} [resource] Representing the storage container or blob (only for Service SAS)
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
constructor(
|
||||
version: string,
|
||||
signature: string,
|
||||
permissions?: string,
|
||||
services?: string,
|
||||
resourceTypes?: string,
|
||||
protocol?: SASProtocol,
|
||||
startTime?: Date,
|
||||
expiryTime?: Date,
|
||||
ipRange?: IIPRange,
|
||||
identifier?: string,
|
||||
resource?: string
|
||||
) {
|
||||
this.version = version;
|
||||
this.services = services;
|
||||
this.resourceTypes = resourceTypes;
|
||||
this.expiryTime = expiryTime;
|
||||
this.permissions = permissions;
|
||||
this.protocol = protocol;
|
||||
this.startTime = startTime;
|
||||
this.ipRangeInner = ipRange;
|
||||
this.identifier = identifier;
|
||||
this.resource = resource;
|
||||
this.signature = signature;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes all SAS query parameters into a string that can be appended to a URL.
|
||||
*
|
||||
* @returns {string}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
public toString(): string {
|
||||
const params: string[] = [
|
||||
"sv",
|
||||
"ss",
|
||||
"srt",
|
||||
"spr",
|
||||
"st",
|
||||
"se",
|
||||
"sip",
|
||||
"si",
|
||||
"sr",
|
||||
"sp",
|
||||
"sig"
|
||||
];
|
||||
const queries: string[] = [];
|
||||
|
||||
for (const param of params) {
|
||||
switch (param) {
|
||||
case "sv":
|
||||
this.tryAppendQueryParameter(queries, param, this.version);
|
||||
break;
|
||||
case "ss":
|
||||
this.tryAppendQueryParameter(queries, param, this.services);
|
||||
break;
|
||||
case "srt":
|
||||
this.tryAppendQueryParameter(queries, param, this.resourceTypes);
|
||||
break;
|
||||
case "spr":
|
||||
this.tryAppendQueryParameter(queries, param, this.protocol);
|
||||
break;
|
||||
case "st":
|
||||
this.tryAppendQueryParameter(
|
||||
queries,
|
||||
param,
|
||||
this.startTime ? truncatedISO8061Date(this.startTime) : undefined
|
||||
);
|
||||
break;
|
||||
case "se":
|
||||
this.tryAppendQueryParameter(
|
||||
queries,
|
||||
param,
|
||||
this.expiryTime ? truncatedISO8061Date(this.expiryTime) : undefined
|
||||
);
|
||||
break;
|
||||
case "sip":
|
||||
this.tryAppendQueryParameter(
|
||||
queries,
|
||||
param,
|
||||
this.ipRange ? ipRangeToString(this.ipRange) : undefined
|
||||
);
|
||||
break;
|
||||
case "si":
|
||||
this.tryAppendQueryParameter(queries, param, this.identifier);
|
||||
break;
|
||||
case "sr":
|
||||
this.tryAppendQueryParameter(queries, param, this.resource);
|
||||
break;
|
||||
case "sp":
|
||||
this.tryAppendQueryParameter(queries, param, this.permissions);
|
||||
break;
|
||||
case "sig":
|
||||
this.tryAppendQueryParameter(queries, param, this.signature);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return queries.join("&");
|
||||
}
|
||||
|
||||
/**
|
||||
* A private helper method used to filter and append query key/value pairs into an array.
|
||||
*
|
||||
* @private
|
||||
* @param {string[]} queries
|
||||
* @param {string} key
|
||||
* @param {string} [value]
|
||||
* @returns {void}
|
||||
* @memberof SASQueryParameters
|
||||
*/
|
||||
private tryAppendQueryParameter(
|
||||
queries: string[],
|
||||
key: string,
|
||||
value?: string
|
||||
): void {
|
||||
if (!value) {
|
||||
return;
|
||||
}
|
||||
|
||||
key = encodeURIComponent(key);
|
||||
value = encodeURIComponent(value);
|
||||
if (key.length > 0 && value.length > 0) {
|
||||
queries.push(`${key}=${value}`);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
import * as Models from "../lib/generated/models";
|
||||
import { Aborter } from "./Aborter";
|
||||
import { Service } from "./generated/operations";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { StorageURL } from "./StorageURL";
|
||||
|
||||
export interface IServiceListSharesSegmentOptions {
|
||||
/**
|
||||
* Filters the results to return only entries whose
|
||||
* name begins with the specified prefix.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IServiceListSharesSegmentOptions
|
||||
*/
|
||||
prefix?: string;
|
||||
|
||||
/**
|
||||
* Specifies the maximum number of entries to
|
||||
* return. If the request does not specify maxresults, or specifies a value
|
||||
* greater than 5,000, the server will return up to 5,000 items.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IServiceListSharesSegmentOptions
|
||||
*/
|
||||
maxresults?: number;
|
||||
|
||||
/**
|
||||
* Include this parameter to
|
||||
* specify one or more datasets to include in the response.
|
||||
*
|
||||
* @type {Models.ListSharesIncludeType[]}
|
||||
* @memberof IServiceListSharesSegmentOptions
|
||||
*/
|
||||
include?: Models.ListSharesIncludeType[];
|
||||
}
|
||||
|
||||
/**
|
||||
* A ServiceURL represents a URL to the Azure Storage File service allowing you
|
||||
* to manipulate file shares.
|
||||
*
|
||||
* @export
|
||||
* @class ServiceURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class ServiceURL extends StorageURL {
|
||||
/**
|
||||
* serviceContext provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {Service}
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
private serviceContext: Service;
|
||||
|
||||
/**
|
||||
* Creates an instance of ServiceURL.
|
||||
*
|
||||
* @param {string} url A URL string pointing to Azure Storage file service, such as
|
||||
* "https://myaccount.file.core.windows.net". You can Append a SAS
|
||||
* if using AnonymousCredential, such as "https://myaccount.file.core.windows.net?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.serviceContext = new Service(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ServiceURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {ServiceURL}
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): ServiceURL {
|
||||
return new ServiceURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the properties of a storage account’s file service, including properties
|
||||
* for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-file-service-properties}
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @returns {Promise<Models.ServiceGetPropertiesResponse>}
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
public async getProperties(
|
||||
aborter: Aborter
|
||||
): Promise<Models.ServiceGetPropertiesResponse> {
|
||||
return this.serviceContext.getProperties({
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets properties for a storage account’s file service endpoint, including properties
|
||||
* for Storage Analytics, CORS (Cross-Origin Resource Sharing) rules and soft delete settings.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties}
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.StorageServiceProperties} properties
|
||||
* @returns {Promise<Models.ServiceSetPropertiesResponse>}
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
public async setProperties(
|
||||
aborter: Aborter,
|
||||
properties: Models.StorageServiceProperties
|
||||
): Promise<Models.ServiceSetPropertiesResponse> {
|
||||
return this.serviceContext.setProperties(properties, {
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the properties of a storage account's File service, including properties for Storage
|
||||
* Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} [marker] A string value that identifies the portion of
|
||||
* the list to be returned with the next list operation. The operation
|
||||
* returns a marker value within the response body if the list returned was
|
||||
* not complete. The marker value may then be used in a subsequent call to
|
||||
* request the next set of list items. The marker value is opaque to the
|
||||
* client.
|
||||
* @param {IServiceListSharesSegmentOptions} [options={}]
|
||||
* @returns {Promise<Models.ServiceListSharesSegmentResponse>}
|
||||
* @memberof ServiceURL
|
||||
*/
|
||||
public async listSharesSegment(
|
||||
aborter: Aborter,
|
||||
marker?: string,
|
||||
options: IServiceListSharesSegmentOptions = {}
|
||||
): Promise<Models.ServiceListSharesSegmentResponse> {
|
||||
return this.serviceContext.listSharesSegment({
|
||||
abortSignal: aborter,
|
||||
marker,
|
||||
...options
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,440 @@
|
|||
import { HttpResponse } from "ms-rest-js";
|
||||
import { Aborter } from "./Aborter";
|
||||
import * as Models from "./generated/models";
|
||||
import { Share } from "./generated/operations";
|
||||
import { Pipeline } from "./Pipeline";
|
||||
import { ServiceURL } from "./ServiceURL";
|
||||
import { StorageURL } from "./StorageURL";
|
||||
import { ETagNone, URLConstants } from "./utils/constants";
|
||||
import {
|
||||
appendToURLPath,
|
||||
setURLParameter,
|
||||
truncatedISO8061Date
|
||||
} from "./utils/utils.common";
|
||||
|
||||
export interface IShareCreateOptions {
|
||||
/**
|
||||
* A name-value pair to associate with a file storage object.
|
||||
*
|
||||
* @type {{ [propertyName: string]: string }}
|
||||
* @memberof IShareCreateOptions
|
||||
*/
|
||||
metadata?: { [propertyName: string]: string };
|
||||
|
||||
/**
|
||||
* Specifies the maximum size of the share, in
|
||||
* gigabytes.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IShareCreateOptions
|
||||
*/
|
||||
quota?: number;
|
||||
}
|
||||
|
||||
export interface IShareGetPropertiesOptions {
|
||||
/**
|
||||
* The snapshot parameter is an opaque
|
||||
* DateTime value that, when present, specifies the share snapshot to query.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IShareGetPropertiesOptions
|
||||
*/
|
||||
sharesnapshot?: string;
|
||||
}
|
||||
|
||||
export interface IShareDeleteMethodOptions {
|
||||
/**
|
||||
* The snapshot parameter is an opaque
|
||||
* DateTime value that, when present, specifies the share snapshot to query.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof IShareDeleteMethodOptions
|
||||
*/
|
||||
sharesnapshot?: string;
|
||||
|
||||
/**
|
||||
* Specifies the option
|
||||
* include to delete the base share and all of its snapshots. Possible values
|
||||
* include: 'include'
|
||||
*
|
||||
* @type {Models.DeleteSnapshotsOptionType}
|
||||
* @memberof IShareDeleteMethodOptions
|
||||
*/
|
||||
deleteSnapshots?: Models.DeleteSnapshotsOptionType;
|
||||
}
|
||||
|
||||
export interface IShareSetMetadataOptions {
|
||||
/**
|
||||
* A name-value pair to associate with a file storage object.
|
||||
*
|
||||
* @type {{ [propertyName: string]: string }}
|
||||
* @memberof IShareCreateOptions
|
||||
*/
|
||||
metadata?: { [propertyName: string]: string };
|
||||
}
|
||||
|
||||
export interface ISignedIdentifier {
|
||||
/**
|
||||
* @member {string} id a unique id
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* @member {AccessPolicy} accessPolicy
|
||||
*/
|
||||
accessPolicy: {
|
||||
/**
|
||||
* @member {Date} start the date-time the policy is active.
|
||||
*/
|
||||
start: Date;
|
||||
/**
|
||||
* @member {string} expiry the date-time the policy expires.
|
||||
*/
|
||||
expiry: Date;
|
||||
/**
|
||||
* @member {string} permission the permissions for the acl policy
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-share-acl
|
||||
*/
|
||||
permission: string;
|
||||
};
|
||||
}
|
||||
|
||||
export declare type ShareGetAccessPolicyResponse = {
|
||||
signedIdentifiers: ISignedIdentifier[];
|
||||
} & Models.ShareGetAccessPolicyHeaders & {
|
||||
/**
|
||||
* The underlying HTTP response.
|
||||
*/
|
||||
_response: HttpResponse & {
|
||||
/**
|
||||
* The parsed HTTP response headers.
|
||||
*/
|
||||
parsedHeaders: Models.ShareGetAccessPolicyHeaders;
|
||||
/**
|
||||
* The response body as text (string format)
|
||||
*/
|
||||
bodyAsText: string;
|
||||
/**
|
||||
* The response body as parsed JSON or XML
|
||||
*/
|
||||
parsedBody: Models.SignedIdentifier[];
|
||||
};
|
||||
};
|
||||
|
||||
export interface IShareCreateSnapshotOptions {
|
||||
/**
|
||||
* A name-value pair to associate with a file storage object.
|
||||
*
|
||||
* @type {{ [propertyName: string]: string }}
|
||||
* @memberof IShareCreateOptions
|
||||
*/
|
||||
metadata?: { [propertyName: string]: string };
|
||||
}
|
||||
|
||||
/**
|
||||
* A ShareURL represents a URL to the Azure Storage share allowing you to manipulate its directories and files.
|
||||
*
|
||||
* @export
|
||||
* @class ShareURL
|
||||
* @extends {StorageURL}
|
||||
*/
|
||||
export class ShareURL extends StorageURL {
|
||||
/**
|
||||
* Creates a ShareURL object from ServiceURL
|
||||
*
|
||||
* @param serviceURL
|
||||
* @param shareName
|
||||
*/
|
||||
public static fromServiceURL(
|
||||
serviceURL: ServiceURL,
|
||||
shareName: string
|
||||
): ShareURL {
|
||||
return new ShareURL(
|
||||
appendToURLPath(serviceURL.url, shareName),
|
||||
serviceURL.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Share operation context provided by protocol layer.
|
||||
*
|
||||
* @private
|
||||
* @type {Share}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
private context: Share;
|
||||
|
||||
/**
|
||||
* Creates an instance of ShareURL.
|
||||
*
|
||||
* @param {string} url A URL string pointing to Azure Storage file share, such as
|
||||
* "https://myaccount.file.core.windows.net/share". You can
|
||||
* append a SAS if using AnonymousCredential, such as
|
||||
* "https://myaccount.file.core.windows.net/share?sasString".
|
||||
* @param {Pipeline} pipeline Call StorageURL.newPipeline() to create a default
|
||||
* pipeline, or provide a customized pipeline.
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
constructor(url: string, pipeline: Pipeline) {
|
||||
super(url, pipeline);
|
||||
this.context = new Share(this.storageClientContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ShareURL object identical to the source but with the
|
||||
* specified request policy pipeline.
|
||||
*
|
||||
* @param {Pipeline} pipeline
|
||||
* @returns {ShareURL}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public withPipeline(pipeline: Pipeline): ShareURL {
|
||||
return new ShareURL(this.url, pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new ShareURL object identical to the source but with the specified snapshot timestamp.
|
||||
* Provide "" will remove the snapshot and return a URL to the base share.
|
||||
*
|
||||
* @param {string} snapshot
|
||||
* @returns {ShareURL} A new ShareURL object identical to the source but with the specified snapshot timestamp
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public withSnapshot(snapshot: string): ShareURL {
|
||||
return new ShareURL(
|
||||
setURLParameter(
|
||||
this.url,
|
||||
URLConstants.Parameters.SNAPSHOT,
|
||||
snapshot.length === 0 ? undefined : snapshot
|
||||
),
|
||||
this.pipeline
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new share under the specified account. If the share with
|
||||
* the same name already exists, the operation fails.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/create-share
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IShareCreateOptions} [options]
|
||||
* @returns {Promise<Models.ShareCreateResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async create(
|
||||
aborter: Aborter,
|
||||
options: IShareCreateOptions = {}
|
||||
): Promise<Models.ShareCreateResponse> {
|
||||
return this.context.create({
|
||||
...options,
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all user-defined metadata and system properties for the specified
|
||||
* share.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-properties
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IShareGetPropertiesOptions} [options]
|
||||
* @returns {Promise<Models.ShareGetPropertiesResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async getProperties(
|
||||
aborter: Aborter,
|
||||
options: IShareGetPropertiesOptions = {}
|
||||
): Promise<Models.ShareGetPropertiesResponse> {
|
||||
return this.context.getProperties({
|
||||
abortSignal: aborter,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the specified share for deletion. The share and any directories or files
|
||||
* contained within it are later deleted during garbage collection.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-share
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Models.IShareDeleteMethodOptions} [options]
|
||||
* @returns {Promise<Models.ShareDeleteResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async delete(
|
||||
aborter: Aborter,
|
||||
options: IShareDeleteMethodOptions = {}
|
||||
): Promise<Models.ShareDeleteResponse> {
|
||||
return this.context.deleteMethod({
|
||||
abortSignal: aborter,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets one or more user-defined name-value pairs for the specified share.
|
||||
*
|
||||
* If no option provided, or no metadata defined in the option parameter, the share
|
||||
* metadata will be removed.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-share-metadata
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IShareSetMetadataOptions} [options]
|
||||
* @returns {Promise<Models.ShareSetMetadataResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async setMetadata(
|
||||
aborter: Aborter,
|
||||
options: IShareSetMetadataOptions = {}
|
||||
): Promise<Models.ShareSetMetadataResponse> {
|
||||
return this.context.setMetadata({
|
||||
abortSignal: aborter,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the permissions for the specified share. The permissions indicate
|
||||
* whether share data may be accessed publicly.
|
||||
*
|
||||
* WARNING: JavaScript Date will potential lost precision when parsing start and expiry string.
|
||||
* For example, new Date("2018-12-31T03:44:23.8827891Z").toISOString() will get "2018-12-31T03:44:23.882Z".
|
||||
*
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/get-share-acl
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @returns {Promise<ShareGetAccessPolicyResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async getAccessPolicy(
|
||||
aborter: Aborter
|
||||
): Promise<ShareGetAccessPolicyResponse> {
|
||||
const response = await this.context.getAccessPolicy({
|
||||
abortSignal: aborter
|
||||
});
|
||||
|
||||
const res: ShareGetAccessPolicyResponse = {
|
||||
_response: response._response,
|
||||
date: response.date,
|
||||
eTag: response.eTag,
|
||||
errorCode: response.errorCode,
|
||||
lastModified: response.lastModified,
|
||||
requestId: response.requestId,
|
||||
signedIdentifiers: [],
|
||||
version: response.version
|
||||
};
|
||||
|
||||
for (const identifier of response) {
|
||||
res.signedIdentifiers.push({
|
||||
accessPolicy: {
|
||||
expiry: new Date(identifier.accessPolicy!.expiry!),
|
||||
permission: identifier.accessPolicy!.permission!,
|
||||
start: new Date(identifier.accessPolicy!.start!)
|
||||
},
|
||||
id: identifier.id
|
||||
});
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the permissions for the specified share. The permissions indicate
|
||||
* whether directories or files in a share may be accessed publicly.
|
||||
*
|
||||
* When you set permissions for a share, the existing permissions are replaced.
|
||||
* If no shareAcl provided, the existing share ACL will be
|
||||
* removed.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/set-share-acl
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {ISignedIdentifier[]} [shareAcl]
|
||||
* @returns {Promise<Models.ShareSetAccessPolicyResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async setAccessPolicy(
|
||||
aborter: Aborter,
|
||||
shareAcl?: ISignedIdentifier[]
|
||||
): Promise<Models.ShareSetAccessPolicyResponse> {
|
||||
const acl: Models.SignedIdentifier[] = [];
|
||||
for (const identifier of shareAcl || []) {
|
||||
acl.push({
|
||||
accessPolicy: {
|
||||
expiry: truncatedISO8061Date(identifier.accessPolicy.expiry),
|
||||
permission: identifier.accessPolicy.permission,
|
||||
start: truncatedISO8061Date(identifier.accessPolicy.start)
|
||||
},
|
||||
id: identifier.id
|
||||
});
|
||||
}
|
||||
|
||||
return this.context.setAccessPolicy({
|
||||
abortSignal: aborter,
|
||||
shareAcl: acl
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a read-only snapshot of a share.
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {IShareCreateSnapshotOptions} [options={}]
|
||||
* @returns {Promise<Models.ShareCreateSnapshotResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async createSnapshot(
|
||||
aborter: Aborter,
|
||||
options: IShareCreateSnapshotOptions = {}
|
||||
): Promise<Models.ShareCreateSnapshotResponse> {
|
||||
return this.context.createSnapshot({
|
||||
abortSignal: aborter,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets quota for the specified share.
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {number} quotaInGB Specifies the maximum size of the share in gigabytes
|
||||
* @returns {Promise<Models.ShareSetQuotaResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async setQuota(
|
||||
aborter: Aborter,
|
||||
quotaInGB: number
|
||||
): Promise<Models.ShareSetQuotaResponse> {
|
||||
if (quotaInGB <= 0 || quotaInGB > 5120) {
|
||||
throw new RangeError(
|
||||
`Share quota must be greater than 0, and less than or equal to 5Tib (5120GB)`
|
||||
);
|
||||
}
|
||||
return this.context.setQuota({
|
||||
abortSignal: aborter,
|
||||
quota: quotaInGB
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves statistics related to the share.
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @returns {Promise<Models.ShareGetStatisticsResponse>}
|
||||
* @memberof ShareURL
|
||||
*/
|
||||
public async getStatistics(
|
||||
aborter: Aborter
|
||||
): Promise<Models.ShareGetStatisticsResponse> {
|
||||
return this.context.getStatistics({
|
||||
abortSignal: aborter
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
import { deserializationPolicy, RequestPolicyFactory } from "ms-rest-js";
|
||||
|
||||
import { BrowserPolicyFactory } from "./BrowserPolicyFactory";
|
||||
import { Credential } from "./credentials/Credential";
|
||||
import { StorageClientContext } from "./generated/storageClientContext";
|
||||
import { LoggingPolicyFactory } from "./LoggingPolicyFactory";
|
||||
import { IHttpClient, IHttpPipelineLogger, Pipeline } from "./Pipeline";
|
||||
import { IRetryOptions, RetryPolicyFactory } from "./RetryPolicyFactory";
|
||||
import {
|
||||
ITelemetryOptions,
|
||||
TelemetryPolicyFactory
|
||||
} from "./TelemetryPolicyFactory";
|
||||
import { UniqueRequestIDPolicyFactory } from "./UniqueRequestIDPolicyFactory";
|
||||
import { SERVICE_VERSION } from "./utils/constants";
|
||||
|
||||
export { deserializationPolicy };
|
||||
|
||||
/**
|
||||
* Option interface for Pipeline.newPipeline method.
|
||||
*
|
||||
* @export
|
||||
* @interface INewPipelineOptions
|
||||
*/
|
||||
export interface INewPipelineOptions {
|
||||
/**
|
||||
* Telemetry configures the built-in telemetry policy behavior.
|
||||
*
|
||||
* @type {ITelemetryOptions}
|
||||
* @memberof INewPipelineOptions
|
||||
*/
|
||||
telemetry?: ITelemetryOptions;
|
||||
retryOptions?: IRetryOptions;
|
||||
|
||||
logger?: IHttpPipelineLogger;
|
||||
httpClient?: IHttpClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* A ServiceURL represents a based URL class for ServiceURL, ContainerURL and etc.
|
||||
*
|
||||
* @export
|
||||
* @class StorageURL
|
||||
*/
|
||||
export abstract class StorageURL {
|
||||
/**
|
||||
* A static method used to create a new Pipeline object with Credential provided.
|
||||
*
|
||||
* @static
|
||||
* @param {Credential} credential Such as AnonymousCredential, SharedKeyCredential or TokenCredential.
|
||||
* @param {INewPipelineOptions} [pipelineOptions] Optional. Options.
|
||||
* @returns {Pipeline} A new Pipeline object.
|
||||
* @memberof Pipeline
|
||||
*/
|
||||
public static newPipeline(
|
||||
credential: Credential,
|
||||
pipelineOptions: INewPipelineOptions = {}
|
||||
): Pipeline {
|
||||
// Order is important. Closer to the API at the top & closer to the network at the bottom.
|
||||
// The credential's policy factory must appear close to the wire so it can sign any
|
||||
// changes made by other factories (like UniqueRequestIDPolicyFactory)
|
||||
const factories: RequestPolicyFactory[] = [];
|
||||
factories.push(new TelemetryPolicyFactory(pipelineOptions.telemetry));
|
||||
factories.push(new UniqueRequestIDPolicyFactory());
|
||||
factories.push(new BrowserPolicyFactory());
|
||||
factories.push(deserializationPolicy()); // Default deserializationPolicy is provided by protocol layer
|
||||
factories.push(new RetryPolicyFactory(pipelineOptions.retryOptions));
|
||||
factories.push(new LoggingPolicyFactory());
|
||||
factories.push(credential);
|
||||
|
||||
return new Pipeline(factories, {
|
||||
HTTPClient: pipelineOptions.httpClient,
|
||||
logger: pipelineOptions.logger
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Request policy pipeline.
|
||||
*
|
||||
* @internal
|
||||
* @type {Pipeline}
|
||||
* @memberof StorageURL
|
||||
*/
|
||||
public readonly pipeline: Pipeline;
|
||||
|
||||
/**
|
||||
* URL string value.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof StorageURL
|
||||
*/
|
||||
public readonly url: string;
|
||||
|
||||
/**
|
||||
* StorageClient is a reference to protocol layer operations entry, which is
|
||||
* generated by AutoRest generator.
|
||||
*
|
||||
* @protected
|
||||
* @type {StorageClient}
|
||||
* @memberof StorageURL
|
||||
*/
|
||||
protected readonly storageClientContext: StorageClientContext;
|
||||
|
||||
/**
|
||||
* Creates an instance of StorageURL.
|
||||
* @param {string} url
|
||||
* @param {Pipeline} pipeline
|
||||
* @memberof StorageURL
|
||||
*/
|
||||
protected constructor(url: string, pipeline: Pipeline) {
|
||||
this.url = url;
|
||||
this.pipeline = pipeline;
|
||||
this.storageClientContext = new StorageClientContext(
|
||||
url,
|
||||
SERVICE_VERSION,
|
||||
pipeline.toServiceClientOptions()
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
import {
|
||||
isNode,
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions
|
||||
} from "ms-rest-js";
|
||||
import * as os from "os";
|
||||
|
||||
import { TelemetryPolicy } from "./policies/TelemetryPolicy";
|
||||
import { SDK_VERSION } from "./utils/constants";
|
||||
|
||||
/**
|
||||
* Interface of TelemetryPolicy options.
|
||||
*
|
||||
* @export
|
||||
* @interface ITelemetryOptions
|
||||
*/
|
||||
export interface ITelemetryOptions {
|
||||
value: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* TelemetryPolicyFactory is a factory class helping generating TelemetryPolicy objects.
|
||||
*
|
||||
* @export
|
||||
* @class TelemetryPolicyFactory
|
||||
* @implements {RequestPolicyFactory}
|
||||
*/
|
||||
export class TelemetryPolicyFactory implements RequestPolicyFactory {
|
||||
private telemetryString: string;
|
||||
|
||||
/**
|
||||
* Creates an instance of TelemetryPolicyFactory.
|
||||
* @param {ITelemetryOptions} [telemetry]
|
||||
* @memberof TelemetryPolicyFactory
|
||||
*/
|
||||
constructor(telemetry?: ITelemetryOptions) {
|
||||
const userAgentInfo: string[] = [];
|
||||
|
||||
if (isNode) {
|
||||
if (telemetry) {
|
||||
const telemetryString = telemetry.value;
|
||||
if (
|
||||
telemetryString.length > 0 &&
|
||||
userAgentInfo.indexOf(telemetryString) === -1
|
||||
) {
|
||||
userAgentInfo.push(telemetryString);
|
||||
}
|
||||
}
|
||||
|
||||
// e.g. Azure-Storage/10.0.0
|
||||
const libInfo = `Azure-Storage/${SDK_VERSION}`;
|
||||
if (userAgentInfo.indexOf(libInfo) === -1) {
|
||||
userAgentInfo.push(libInfo);
|
||||
}
|
||||
|
||||
// e.g. (NODE-VERSION 4.9.1; Windows_NT 10.0.16299)
|
||||
const runtimeInfo = `(NODE-VERSION ${
|
||||
process.version
|
||||
}; ${os.type()} ${os.release()})`;
|
||||
if (userAgentInfo.indexOf(runtimeInfo) === -1) {
|
||||
userAgentInfo.push(runtimeInfo);
|
||||
}
|
||||
}
|
||||
|
||||
this.telemetryString = userAgentInfo.join(" ");
|
||||
}
|
||||
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): TelemetryPolicy {
|
||||
return new TelemetryPolicy(nextPolicy, options, this.telemetryString);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
import { RequestPolicy, RequestPolicyFactory, RequestPolicyOptions } from "ms-rest-js";
|
||||
|
||||
import { UniqueRequestIDPolicy } from "./policies/UniqueRequestIDPolicy";
|
||||
|
||||
/**
|
||||
* UniqueRequestIDPolicyFactory is a factory class helping generating UniqueRequestIDPolicy objects.
|
||||
*
|
||||
* @export
|
||||
* @class UniqueRequestIDPolicyFactory
|
||||
* @implements {RequestPolicyFactory}
|
||||
*/
|
||||
export class UniqueRequestIDPolicyFactory implements RequestPolicyFactory {
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): UniqueRequestIDPolicy {
|
||||
return new UniqueRequestIDPolicy(nextPolicy, options);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
import { RequestPolicy, RequestPolicyOptions } from "ms-rest-js";
|
||||
|
||||
import { AnonymousCredentialPolicy } from "../policies/AnonymousCredentialPolicy";
|
||||
import { Credential } from "./Credential";
|
||||
|
||||
/**
|
||||
* AnonymousCredential provides a credentialPolicyCreator member used to create
|
||||
* AnonymousCredentialPolicy objects. AnonymousCredentialPolicy is used with
|
||||
* HTTP(S) requests that read public resources or for use with Shared Access
|
||||
* Signatures (SAS).
|
||||
*
|
||||
* @export
|
||||
* @class AnonymousCredential
|
||||
* @extends {Credential}
|
||||
*/
|
||||
export class AnonymousCredential extends Credential {
|
||||
/**
|
||||
* Creates an AnonymousCredentialPolicy object.
|
||||
*
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @returns {AnonymousCredentialPolicy}
|
||||
* @memberof AnonymousCredential
|
||||
*/
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): AnonymousCredentialPolicy {
|
||||
return new AnonymousCredentialPolicy(nextPolicy, options);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
import {
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { CredentialPolicy } from "../policies/CredentialPolicy";
|
||||
|
||||
/**
|
||||
* Credential is an abstract class for Azure Storage HTTP requests signing. This
|
||||
* class will host an credentialPolicyCreator factory which generates CredentialPolicy.
|
||||
*
|
||||
* @export
|
||||
* @abstract
|
||||
* @class Credential
|
||||
*/
|
||||
export abstract class Credential implements RequestPolicyFactory {
|
||||
/**
|
||||
* Creates a RequestPolicy object.
|
||||
*
|
||||
* @param {RequestPolicy} _nextPolicy
|
||||
* @param {RequestPolicyOptions} _options
|
||||
* @returns {RequestPolicy}
|
||||
* @memberof Credential
|
||||
*/
|
||||
public create(
|
||||
// tslint:disable-next-line:variable-name
|
||||
_nextPolicy: RequestPolicy,
|
||||
// tslint:disable-next-line:variable-name
|
||||
_options: RequestPolicyOptions
|
||||
): RequestPolicy {
|
||||
throw new Error("Method should be implemented in children classes.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A factory function that creates a new CredentialPolicy that uses the provided nextPolicy.
|
||||
*/
|
||||
export type CredentialPolicyCreator = (
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
) => CredentialPolicy;
|
|
@ -0,0 +1,72 @@
|
|||
import * as Crypto from "crypto";
|
||||
import { RequestPolicy, RequestPolicyOptions } from "ms-rest-js";
|
||||
|
||||
import { SharedKeyCredentialPolicy } from "../policies/SharedKeyCredentialPolicy";
|
||||
import { Credential } from "./Credential";
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* SharedKeyCredential for account key authorization of Azure Storage service.
|
||||
*
|
||||
* @export
|
||||
* @class SharedKeyCredential
|
||||
* @extends {Credential}
|
||||
*/
|
||||
export class SharedKeyCredential extends Credential {
|
||||
/**
|
||||
* Azure Storage account name; readonly.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof SharedKeyCredential
|
||||
*/
|
||||
public readonly accountName: string;
|
||||
|
||||
/**
|
||||
* Azure Storage account key; readonly.
|
||||
*
|
||||
* @type {Buffer}
|
||||
* @memberof SharedKeyCredential
|
||||
*/
|
||||
private readonly accountKey: Buffer;
|
||||
|
||||
/**
|
||||
* Creates an instance of SharedKeyCredential.
|
||||
* @param {string} accountName
|
||||
* @param {string} accountKey
|
||||
* @memberof SharedKeyCredential
|
||||
*/
|
||||
constructor(accountName: string, accountKey: string) {
|
||||
super();
|
||||
this.accountName = accountName;
|
||||
this.accountKey = Buffer.from(accountKey, "base64");
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a SharedKeyCredentialPolicy object.
|
||||
*
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @returns {SharedKeyCredentialPolicy}
|
||||
* @memberof SharedKeyCredential
|
||||
*/
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): SharedKeyCredentialPolicy {
|
||||
return new SharedKeyCredentialPolicy(nextPolicy, options, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a hash signature for an HTTP request or for a SAS.
|
||||
*
|
||||
* @param {string} stringToSign
|
||||
* @returns {string}
|
||||
* @memberof SharedKeyCredential
|
||||
*/
|
||||
public computeHMACSHA256(stringToSign: string): string {
|
||||
return Crypto.createHmac("sha256", this.accountKey)
|
||||
.update(stringToSign, "utf8")
|
||||
.digest("base64");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
import { RequestPolicy, RequestPolicyOptions } from "ms-rest-js";
|
||||
|
||||
import { Credential } from "../credentials/Credential";
|
||||
import { TokenCredentialPolicy } from "../policies/TokenCredentialPolicy";
|
||||
|
||||
/**
|
||||
* TokenCredential is a Credential used to generate a TokenCredentialPolicy.
|
||||
* Renew token by setting a new token string value to token property.
|
||||
*
|
||||
* @example
|
||||
* const tokenCredential = new TokenCredential("token");
|
||||
* const pipeline = StorageURL.newPipeline(tokenCredential);
|
||||
*
|
||||
* // List containers
|
||||
* const serviceURL = new ServiceURL("https://mystorageaccount.blob.core.windows.net", pipeline);
|
||||
*
|
||||
* // Set up a timer to refresh the token
|
||||
* const timerID = setInterval(() => {
|
||||
* // Update token by accessing to public tokenCredential.token
|
||||
* tokenCredential.token = "updatedToken";
|
||||
* // WARNING: Timer must be manually stopped! It will forbid GC of tokenCredential
|
||||
* if (shouldStop()) {
|
||||
* clearInterval(timerID);
|
||||
* }
|
||||
* }, 60 * 60 * 1000); // Set an interval time before your token expired
|
||||
* @export
|
||||
* @class TokenCredential
|
||||
* @extends {Credential}
|
||||
*
|
||||
*/
|
||||
export class TokenCredential extends Credential {
|
||||
/**
|
||||
* Mutable token value. You can set a renewed token value to this property,
|
||||
* for example, when an OAuth token is expired.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof TokenCredential
|
||||
*/
|
||||
public token: string;
|
||||
|
||||
/**
|
||||
* Creates an instance of TokenCredential.
|
||||
* @param {string} token
|
||||
* @memberof TokenCredential
|
||||
*/
|
||||
constructor(token: string) {
|
||||
super();
|
||||
this.token = token;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a TokenCredentialPolicy object.
|
||||
*
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @returns {TokenCredentialPolicy}
|
||||
* @memberof TokenCredential
|
||||
*/
|
||||
public create(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): TokenCredentialPolicy {
|
||||
return new TokenCredentialPolicy(nextPolicy, options, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
export {
|
||||
discriminators,
|
||||
DirectoryCreateHeaders,
|
||||
StorageError,
|
||||
DirectoryGetPropertiesHeaders,
|
||||
DirectoryDeleteHeaders,
|
||||
DirectorySetMetadataHeaders,
|
||||
ListFilesAndDirectoriesSegmentResponse,
|
||||
Entry,
|
||||
DirectoryListFilesAndDirectoriesSegmentHeaders,
|
||||
DirectoryItem,
|
||||
FileItem,
|
||||
FileProperty
|
||||
} from "../models/mappers";
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
export {
|
||||
discriminators,
|
||||
FileCreateHeaders,
|
||||
StorageError,
|
||||
FileDownloadHeaders,
|
||||
FileGetPropertiesHeaders,
|
||||
FileDeleteHeaders,
|
||||
FileSetHTTPHeadersHeaders,
|
||||
FileSetMetadataHeaders,
|
||||
FileUploadRangeHeaders,
|
||||
Range,
|
||||
FileGetRangeListHeaders,
|
||||
FileStartCopyHeaders,
|
||||
FileAbortCopyHeaders
|
||||
} from "../models/mappers";
|
||||
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,523 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
|
||||
export const comp0: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'properties',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp1: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'list',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp2: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'snapshot',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp3: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'metadata',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp4: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'acl',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp5: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'stats',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp6: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'range',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp7: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'rangelist',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const comp8: msRest.OperationQueryParameter = {
|
||||
parameterPath: "comp",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "comp",
|
||||
defaultValue: 'copy',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const contentLength: msRest.OperationParameter = {
|
||||
parameterPath: "contentLength",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "Content-Length",
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const contentMD5: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"contentMD5"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "Content-MD5",
|
||||
type: {
|
||||
name: "ByteArray"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const copyActionAbortConstant: msRest.OperationParameter = {
|
||||
parameterPath: "copyActionAbortConstant",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "x-ms-copy-action",
|
||||
defaultValue: 'abort',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const copyId: msRest.OperationQueryParameter = {
|
||||
parameterPath: "copyId",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "copyid",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const copySource: msRest.OperationParameter = {
|
||||
parameterPath: "copySource",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "x-ms-copy-source",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const deleteSnapshots: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"deleteSnapshots"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-delete-snapshots",
|
||||
type: {
|
||||
name: "Enum",
|
||||
allowedValues: [
|
||||
"include"
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileCacheControl: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileCacheControl"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-cache-control",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentDisposition: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentDisposition"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-disposition",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentEncoding: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentEncoding"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-encoding",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentLanguage: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentLanguage"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-language",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentLength0: msRest.OperationParameter = {
|
||||
parameterPath: "fileContentLength",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "x-ms-content-length",
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentLength1: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentLength"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-length",
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentMD5: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentMD5"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-md5",
|
||||
type: {
|
||||
name: "ByteArray"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileContentType: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"fileContentType"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-content-type",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileRangeWrite: msRest.OperationParameter = {
|
||||
parameterPath: "fileRangeWrite",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "x-ms-write",
|
||||
defaultValue: 'update',
|
||||
type: {
|
||||
name: "Enum",
|
||||
allowedValues: [
|
||||
"update",
|
||||
"clear"
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
export const fileTypeConstant: msRest.OperationParameter = {
|
||||
parameterPath: "fileTypeConstant",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "x-ms-type",
|
||||
defaultValue: 'file',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const include: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"include"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "include",
|
||||
type: {
|
||||
name: "Sequence",
|
||||
element: {
|
||||
type: {
|
||||
name: "Enum",
|
||||
allowedValues: [
|
||||
"snapshots",
|
||||
"metadata"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
collectionFormat: msRest.QueryCollectionFormat.Csv
|
||||
};
|
||||
export const marker: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"marker"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "marker",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const maxresults: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"maxresults"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "maxresults",
|
||||
constraints: {
|
||||
InclusiveMinimum: 1
|
||||
},
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const metadata: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"metadata"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-meta",
|
||||
type: {
|
||||
name: "Dictionary",
|
||||
value: {
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
},
|
||||
headerCollectionPrefix: "x-ms-meta-"
|
||||
}
|
||||
};
|
||||
export const prefix: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"prefix"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "prefix",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const quota: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"quota"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-share-quota",
|
||||
constraints: {
|
||||
InclusiveMinimum: 1
|
||||
},
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const range0: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"range"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-range",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const range1: msRest.OperationParameter = {
|
||||
parameterPath: "range",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "x-ms-range",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const rangeGetContentMD5: msRest.OperationParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"rangeGetContentMD5"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "x-ms-range-get-content-md5",
|
||||
type: {
|
||||
name: "Boolean"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const restype0: msRest.OperationQueryParameter = {
|
||||
parameterPath: "restype",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "restype",
|
||||
defaultValue: 'service',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const restype1: msRest.OperationQueryParameter = {
|
||||
parameterPath: "restype",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "restype",
|
||||
defaultValue: 'share',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const restype2: msRest.OperationQueryParameter = {
|
||||
parameterPath: "restype",
|
||||
mapper: {
|
||||
required: true,
|
||||
isConstant: true,
|
||||
serializedName: "restype",
|
||||
defaultValue: 'directory',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const sharesnapshot: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"sharesnapshot"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "sharesnapshot",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const timeout: msRest.OperationQueryParameter = {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"timeout"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "timeout",
|
||||
constraints: {
|
||||
InclusiveMinimum: 0
|
||||
},
|
||||
type: {
|
||||
name: "Number"
|
||||
}
|
||||
}
|
||||
};
|
||||
export const url: msRest.OperationURLParameter = {
|
||||
parameterPath: "url",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "url",
|
||||
defaultValue: '',
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
},
|
||||
skipEncoding: true
|
||||
};
|
||||
export const version: msRest.OperationParameter = {
|
||||
parameterPath: "version",
|
||||
mapper: {
|
||||
required: true,
|
||||
serializedName: "x-ms-version",
|
||||
type: {
|
||||
name: "String"
|
||||
}
|
||||
}
|
||||
};
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
export {
|
||||
discriminators,
|
||||
StorageServiceProperties,
|
||||
Metrics,
|
||||
RetentionPolicy,
|
||||
CorsRule,
|
||||
ServiceSetPropertiesHeaders,
|
||||
StorageError,
|
||||
ServiceGetPropertiesHeaders,
|
||||
ListSharesResponse,
|
||||
ShareItem,
|
||||
ShareProperties,
|
||||
ServiceListSharesSegmentHeaders
|
||||
} from "../models/mappers";
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
export {
|
||||
discriminators,
|
||||
ShareCreateHeaders,
|
||||
StorageError,
|
||||
ShareGetPropertiesHeaders,
|
||||
ShareDeleteHeaders,
|
||||
ShareCreateSnapshotHeaders,
|
||||
ShareSetQuotaHeaders,
|
||||
ShareSetMetadataHeaders,
|
||||
SignedIdentifier,
|
||||
AccessPolicy,
|
||||
ShareGetAccessPolicyHeaders,
|
||||
ShareSetAccessPolicyHeaders,
|
||||
ShareStats,
|
||||
ShareGetStatisticsHeaders
|
||||
} from "../models/mappers";
|
||||
|
|
@ -0,0 +1,290 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
import * as Models from "../models";
|
||||
import * as Mappers from "../models/directoryMappers";
|
||||
import * as Parameters from "../models/parameters";
|
||||
import { StorageClientContext } from "../storageClientContext";
|
||||
|
||||
/** Class representing a Directory. */
|
||||
export class Directory {
|
||||
private readonly client: StorageClientContext;
|
||||
|
||||
/**
|
||||
* Create a Directory.
|
||||
* @param {StorageClientContext} client Reference to the service client.
|
||||
*/
|
||||
constructor(client: StorageClientContext) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new directory under the specified share or parent directory.
|
||||
*
|
||||
* @param {DirectoryCreateOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
create(): Promise<Models.DirectoryCreateResponse>;
|
||||
create(options: Models.DirectoryCreateOptionalParams): Promise<Models.DirectoryCreateResponse>;
|
||||
create(callback: msRest.ServiceCallback<void>): void;
|
||||
create(options: Models.DirectoryCreateOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
create(options?: Models.DirectoryCreateOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.DirectoryCreateResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
createOperationSpec,
|
||||
callback) as Promise<Models.DirectoryCreateResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all system properties for the specified directory, and can also be used to check the
|
||||
* existence of a directory. The data returned does not include the files in the directory or any
|
||||
* subdirectories.
|
||||
*
|
||||
* @param {DirectoryGetPropertiesOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getProperties(): Promise<Models.DirectoryGetPropertiesResponse>;
|
||||
getProperties(options: Models.DirectoryGetPropertiesOptionalParams): Promise<Models.DirectoryGetPropertiesResponse>;
|
||||
getProperties(callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options: Models.DirectoryGetPropertiesOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options?: Models.DirectoryGetPropertiesOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.DirectoryGetPropertiesResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getPropertiesOperationSpec,
|
||||
callback) as Promise<Models.DirectoryGetPropertiesResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the specified empty directory. Note that the directory must be empty before it can be
|
||||
* deleted.
|
||||
*
|
||||
* @param {DirectoryDeleteMethodOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
deleteMethod(): Promise<Models.DirectoryDeleteResponse>;
|
||||
deleteMethod(options: Models.DirectoryDeleteMethodOptionalParams): Promise<Models.DirectoryDeleteResponse>;
|
||||
deleteMethod(callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options: Models.DirectoryDeleteMethodOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options?: Models.DirectoryDeleteMethodOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.DirectoryDeleteResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
deleteMethodOperationSpec,
|
||||
callback) as Promise<Models.DirectoryDeleteResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates user defined metadata for the specified directory.
|
||||
*
|
||||
* @param {DirectorySetMetadataOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setMetadata(): Promise<Models.DirectorySetMetadataResponse>;
|
||||
setMetadata(options: Models.DirectorySetMetadataOptionalParams): Promise<Models.DirectorySetMetadataResponse>;
|
||||
setMetadata(callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options: Models.DirectorySetMetadataOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options?: Models.DirectorySetMetadataOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.DirectorySetMetadataResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setMetadataOperationSpec,
|
||||
callback) as Promise<Models.DirectorySetMetadataResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of files or directories under the specified share or directory. It lists the
|
||||
* contents only for a single level of the directory hierarchy.
|
||||
*
|
||||
* @param {DirectoryListFilesAndDirectoriesSegmentOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
listFilesAndDirectoriesSegment(): Promise<Models.DirectoryListFilesAndDirectoriesSegmentResponse>;
|
||||
listFilesAndDirectoriesSegment(options: Models.DirectoryListFilesAndDirectoriesSegmentOptionalParams): Promise<Models.DirectoryListFilesAndDirectoriesSegmentResponse>;
|
||||
listFilesAndDirectoriesSegment(callback: msRest.ServiceCallback<Models.ListFilesAndDirectoriesSegmentResponse>): void;
|
||||
listFilesAndDirectoriesSegment(options: Models.DirectoryListFilesAndDirectoriesSegmentOptionalParams, callback: msRest.ServiceCallback<Models.ListFilesAndDirectoriesSegmentResponse>): void;
|
||||
listFilesAndDirectoriesSegment(options?: Models.DirectoryListFilesAndDirectoriesSegmentOptionalParams, callback?: msRest.ServiceCallback<Models.ListFilesAndDirectoriesSegmentResponse>): Promise<Models.DirectoryListFilesAndDirectoriesSegmentResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
listFilesAndDirectoriesSegmentOperationSpec,
|
||||
callback) as Promise<Models.DirectoryListFilesAndDirectoriesSegmentResponse>;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Operation Specifications
|
||||
const serializer = new msRest.Serializer(Mappers, true);
|
||||
const createOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype2
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
201: {
|
||||
headersMapper: Mappers.DirectoryCreateHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getPropertiesOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}/{directory}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.timeout,
|
||||
Parameters.restype2
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.DirectoryGetPropertiesHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const deleteMethodOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "DELETE",
|
||||
path: "{shareName}/{directory}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype2
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.DirectoryDeleteHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setMetadataOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype2,
|
||||
Parameters.comp3
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.DirectorySetMetadataHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const listFilesAndDirectoriesSegmentOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}/{directory}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.prefix,
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.marker,
|
||||
Parameters.maxresults,
|
||||
Parameters.timeout,
|
||||
Parameters.restype2,
|
||||
Parameters.comp1
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: Mappers.ListFilesAndDirectoriesSegmentResponse,
|
||||
headersMapper: Mappers.DirectoryListFilesAndDirectoriesSegmentHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
|
@ -0,0 +1,624 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
import * as Models from "../models";
|
||||
import * as Mappers from "../models/fileMappers";
|
||||
import * as Parameters from "../models/parameters";
|
||||
import { StorageClientContext } from "../storageClientContext";
|
||||
|
||||
/** Class representing a File. */
|
||||
export class File {
|
||||
private readonly client: StorageClientContext;
|
||||
|
||||
/**
|
||||
* Create a File.
|
||||
* @param {StorageClientContext} client Reference to the service client.
|
||||
*/
|
||||
constructor(client: StorageClientContext) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new file or replaces a file. Note it only initializes the file with no content.
|
||||
*
|
||||
* @param {number} fileContentLength Specifies the maximum size for the file, up to 1 TB.
|
||||
*
|
||||
* @param {FileCreateOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
create(fileContentLength: number): Promise<Models.FileCreateResponse>;
|
||||
create(fileContentLength: number, options: Models.FileCreateOptionalParams): Promise<Models.FileCreateResponse>;
|
||||
create(fileContentLength: number, callback: msRest.ServiceCallback<void>): void;
|
||||
create(fileContentLength: number, options: Models.FileCreateOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
create(fileContentLength: number, options?: Models.FileCreateOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileCreateResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
fileContentLength,
|
||||
options
|
||||
},
|
||||
createOperationSpec,
|
||||
callback) as Promise<Models.FileCreateResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads or downloads a file from the system, including its metadata and properties.
|
||||
*
|
||||
* @param {FileDownloadOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
download(): Promise<Models.FileDownloadResponse>;
|
||||
download(options: Models.FileDownloadOptionalParams): Promise<Models.FileDownloadResponse>;
|
||||
download(callback: msRest.ServiceCallback<void>): void;
|
||||
download(options: Models.FileDownloadOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
download(options?: Models.FileDownloadOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileDownloadResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
downloadOperationSpec,
|
||||
callback) as Promise<Models.FileDownloadResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all user-defined metadata, standard HTTP properties, and system properties for the file.
|
||||
* It does not return the content of the file.
|
||||
*
|
||||
* @param {FileGetPropertiesOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getProperties(): Promise<Models.FileGetPropertiesResponse>;
|
||||
getProperties(options: Models.FileGetPropertiesOptionalParams): Promise<Models.FileGetPropertiesResponse>;
|
||||
getProperties(callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options: Models.FileGetPropertiesOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options?: Models.FileGetPropertiesOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileGetPropertiesResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getPropertiesOperationSpec,
|
||||
callback) as Promise<Models.FileGetPropertiesResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* removes the file from the storage account.
|
||||
*
|
||||
* @param {FileDeleteMethodOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
deleteMethod(): Promise<Models.FileDeleteResponse>;
|
||||
deleteMethod(options: Models.FileDeleteMethodOptionalParams): Promise<Models.FileDeleteResponse>;
|
||||
deleteMethod(callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options: Models.FileDeleteMethodOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options?: Models.FileDeleteMethodOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileDeleteResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
deleteMethodOperationSpec,
|
||||
callback) as Promise<Models.FileDeleteResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets HTTP headers on the file.
|
||||
*
|
||||
* @param {FileSetHTTPHeadersOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setHTTPHeaders(): Promise<Models.FileSetHTTPHeadersResponse>;
|
||||
setHTTPHeaders(options: Models.FileSetHTTPHeadersOptionalParams): Promise<Models.FileSetHTTPHeadersResponse>;
|
||||
setHTTPHeaders(callback: msRest.ServiceCallback<void>): void;
|
||||
setHTTPHeaders(options: Models.FileSetHTTPHeadersOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setHTTPHeaders(options?: Models.FileSetHTTPHeadersOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileSetHTTPHeadersResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setHTTPHeadersOperationSpec,
|
||||
callback) as Promise<Models.FileSetHTTPHeadersResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates user-defined metadata for the specified file.
|
||||
*
|
||||
* @param {FileSetMetadataOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setMetadata(): Promise<Models.FileSetMetadataResponse>;
|
||||
setMetadata(options: Models.FileSetMetadataOptionalParams): Promise<Models.FileSetMetadataResponse>;
|
||||
setMetadata(callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options: Models.FileSetMetadataOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options?: Models.FileSetMetadataOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileSetMetadataResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setMetadataOperationSpec,
|
||||
callback) as Promise<Models.FileSetMetadataResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload a range of bytes to a file.
|
||||
*
|
||||
* @param {string} range Specifies the range of bytes to be written. Both the start and end of the
|
||||
* range must be specified. For an update operation, the range can be up to 4 MB in size. For a
|
||||
* clear operation, the range can be up to the value of the file's full size. The File service
|
||||
* accepts only a single byte range for the Range and 'x-ms-range' headers, and the byte range must
|
||||
* be specified in the following format: bytes=startByte-endByte.
|
||||
*
|
||||
* @param {FileRangeWriteType} fileRangeWrite Specify one of the following options: - Update:
|
||||
* Writes the bytes specified by the request body into the specified range. The Range and
|
||||
* Content-Length headers must match to perform the update. - Clear: Clears the specified range and
|
||||
* releases the space used in storage for that range. To clear a range, set the Content-Length
|
||||
* header to zero, and set the Range header to a value that indicates the range to clear, up to
|
||||
* maximum file size. Possible values include: 'update', 'clear'
|
||||
*
|
||||
* @param {number} contentLength Specifies the number of bytes being transmitted in the request
|
||||
* body. When the x-ms-write header is set to clear, the value of this header must be set to zero.
|
||||
*
|
||||
* @param {FileUploadRangeOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
uploadRange(range: string, fileRangeWrite: Models.FileRangeWriteType, contentLength: number): Promise<Models.FileUploadRangeResponse>;
|
||||
uploadRange(range: string, fileRangeWrite: Models.FileRangeWriteType, contentLength: number, options: Models.FileUploadRangeOptionalParams): Promise<Models.FileUploadRangeResponse>;
|
||||
uploadRange(range: string, fileRangeWrite: Models.FileRangeWriteType, contentLength: number, callback: msRest.ServiceCallback<void>): void;
|
||||
uploadRange(range: string, fileRangeWrite: Models.FileRangeWriteType, contentLength: number, options: Models.FileUploadRangeOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
uploadRange(range: string, fileRangeWrite: Models.FileRangeWriteType, contentLength: number, options?: Models.FileUploadRangeOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileUploadRangeResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
range,
|
||||
fileRangeWrite,
|
||||
contentLength,
|
||||
options
|
||||
},
|
||||
uploadRangeOperationSpec,
|
||||
callback) as Promise<Models.FileUploadRangeResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of valid ranges for a file.
|
||||
*
|
||||
* @param {FileGetRangeListOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getRangeList(): Promise<Models.FileGetRangeListResponse>;
|
||||
getRangeList(options: Models.FileGetRangeListOptionalParams): Promise<Models.FileGetRangeListResponse>;
|
||||
getRangeList(callback: msRest.ServiceCallback<Models.Range[]>): void;
|
||||
getRangeList(options: Models.FileGetRangeListOptionalParams, callback: msRest.ServiceCallback<Models.Range[]>): void;
|
||||
getRangeList(options?: Models.FileGetRangeListOptionalParams, callback?: msRest.ServiceCallback<Models.Range[]>): Promise<Models.FileGetRangeListResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getRangeListOperationSpec,
|
||||
callback) as Promise<Models.FileGetRangeListResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies a blob or file to a destination file within the storage account.
|
||||
*
|
||||
* @param {string} copySource Specifies the URL of the source file or blob, up to 2 KB in length.
|
||||
* To copy a file to another file within the same storage account, you may use Shared Key to
|
||||
* authenticate the source file. If you are copying a file from another storage account, or if you
|
||||
* are copying a blob from the same storage account or another storage account, then you must
|
||||
* authenticate the source file or blob using a shared access signature. If the source is a public
|
||||
* blob, no authentication is required to perform the copy operation. A file in a share snapshot
|
||||
* can also be specified as a copy source.
|
||||
*
|
||||
* @param {FileStartCopyOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
startCopy(copySource: string): Promise<Models.FileStartCopyResponse>;
|
||||
startCopy(copySource: string, options: Models.FileStartCopyOptionalParams): Promise<Models.FileStartCopyResponse>;
|
||||
startCopy(copySource: string, callback: msRest.ServiceCallback<void>): void;
|
||||
startCopy(copySource: string, options: Models.FileStartCopyOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
startCopy(copySource: string, options?: Models.FileStartCopyOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileStartCopyResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
copySource,
|
||||
options
|
||||
},
|
||||
startCopyOperationSpec,
|
||||
callback) as Promise<Models.FileStartCopyResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full
|
||||
* metadata.
|
||||
*
|
||||
* @param {string} copyId The copy identifier provided in the x-ms-copy-id header of the original
|
||||
* Copy File operation.
|
||||
*
|
||||
* @param {FileAbortCopyOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
abortCopy(copyId: string): Promise<Models.FileAbortCopyResponse>;
|
||||
abortCopy(copyId: string, options: Models.FileAbortCopyOptionalParams): Promise<Models.FileAbortCopyResponse>;
|
||||
abortCopy(copyId: string, callback: msRest.ServiceCallback<void>): void;
|
||||
abortCopy(copyId: string, options: Models.FileAbortCopyOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
abortCopy(copyId: string, options?: Models.FileAbortCopyOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.FileAbortCopyResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
copyId,
|
||||
options
|
||||
},
|
||||
abortCopyOperationSpec,
|
||||
callback) as Promise<Models.FileAbortCopyResponse>;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Operation Specifications
|
||||
const serializer = new msRest.Serializer(Mappers, true);
|
||||
const createOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.fileContentLength0,
|
||||
Parameters.fileTypeConstant,
|
||||
Parameters.fileContentType,
|
||||
Parameters.fileContentEncoding,
|
||||
Parameters.fileContentLanguage,
|
||||
Parameters.fileCacheControl,
|
||||
Parameters.fileContentMD5,
|
||||
Parameters.fileContentDisposition,
|
||||
Parameters.metadata
|
||||
],
|
||||
responses: {
|
||||
201: {
|
||||
headersMapper: Mappers.FileCreateHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const downloadOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.range0,
|
||||
Parameters.rangeGetContentMD5
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: {
|
||||
serializedName: "parsedResponse",
|
||||
type: {
|
||||
name: "Stream"
|
||||
}
|
||||
},
|
||||
headersMapper: Mappers.FileDownloadHeaders
|
||||
},
|
||||
206: {
|
||||
bodyMapper: {
|
||||
serializedName: "parsedResponse",
|
||||
type: {
|
||||
name: "Stream"
|
||||
}
|
||||
},
|
||||
headersMapper: Mappers.FileDownloadHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getPropertiesOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "HEAD",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.timeout
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.FileGetPropertiesHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const deleteMethodOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "DELETE",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.FileDeleteHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setHTTPHeadersOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.comp0
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.fileContentLength1,
|
||||
Parameters.fileContentType,
|
||||
Parameters.fileContentEncoding,
|
||||
Parameters.fileContentLanguage,
|
||||
Parameters.fileCacheControl,
|
||||
Parameters.fileContentMD5,
|
||||
Parameters.fileContentDisposition
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.FileSetHTTPHeadersHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setMetadataOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.comp3
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.FileSetMetadataHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const uploadRangeOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.comp6
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.range1,
|
||||
Parameters.fileRangeWrite,
|
||||
Parameters.contentLength,
|
||||
Parameters.contentMD5,
|
||||
Parameters.version
|
||||
],
|
||||
requestBody: {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"optionalbody"
|
||||
],
|
||||
mapper: {
|
||||
serializedName: "optionalbody",
|
||||
type: {
|
||||
name: "Stream"
|
||||
}
|
||||
}
|
||||
},
|
||||
contentType: "application/octet-stream",
|
||||
responses: {
|
||||
201: {
|
||||
headersMapper: Mappers.FileUploadRangeHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getRangeListOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.timeout,
|
||||
Parameters.comp7
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.range0
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: {
|
||||
xmlElementName: "Ranges",
|
||||
serializedName: "parsedResponse",
|
||||
type: {
|
||||
name: "Sequence",
|
||||
element: {
|
||||
type: {
|
||||
name: "Composite",
|
||||
className: "Range"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
headersMapper: Mappers.FileGetRangeListHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const startCopyOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.metadata,
|
||||
Parameters.copySource
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.FileStartCopyHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const abortCopyOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}/{directory}/{fileName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.copyId,
|
||||
Parameters.timeout,
|
||||
Parameters.comp8
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.copyActionAbortConstant,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
204: {
|
||||
headersMapper: Mappers.FileAbortCopyHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
export * from "./service";
|
||||
export * from "./share";
|
||||
export * from "./directory";
|
||||
export * from "./file";
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
import * as Models from "../models";
|
||||
import * as Mappers from "../models/serviceMappers";
|
||||
import * as Parameters from "../models/parameters";
|
||||
import { StorageClientContext } from "../storageClientContext";
|
||||
|
||||
/** Class representing a Service. */
|
||||
export class Service {
|
||||
private readonly client: StorageClientContext;
|
||||
|
||||
/**
|
||||
* Create a Service.
|
||||
* @param {StorageClientContext} client Reference to the service client.
|
||||
*/
|
||||
constructor(client: StorageClientContext) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets properties for a storage account's File service endpoint, including properties for Storage
|
||||
* Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
*
|
||||
* @param {StorageServiceProperties} storageServiceProperties The StorageService properties.
|
||||
*
|
||||
* @param {ServiceSetPropertiesOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setProperties(storageServiceProperties: Models.StorageServiceProperties): Promise<Models.ServiceSetPropertiesResponse>;
|
||||
setProperties(storageServiceProperties: Models.StorageServiceProperties, options: Models.ServiceSetPropertiesOptionalParams): Promise<Models.ServiceSetPropertiesResponse>;
|
||||
setProperties(storageServiceProperties: Models.StorageServiceProperties, callback: msRest.ServiceCallback<void>): void;
|
||||
setProperties(storageServiceProperties: Models.StorageServiceProperties, options: Models.ServiceSetPropertiesOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setProperties(storageServiceProperties: Models.StorageServiceProperties, options?: Models.ServiceSetPropertiesOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ServiceSetPropertiesResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
storageServiceProperties,
|
||||
options
|
||||
},
|
||||
setPropertiesOperationSpec,
|
||||
callback) as Promise<Models.ServiceSetPropertiesResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the properties of a storage account's File service, including properties for Storage
|
||||
* Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
|
||||
*
|
||||
* @param {ServiceGetPropertiesOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getProperties(): Promise<Models.ServiceGetPropertiesResponse>;
|
||||
getProperties(options: Models.ServiceGetPropertiesOptionalParams): Promise<Models.ServiceGetPropertiesResponse>;
|
||||
getProperties(callback: msRest.ServiceCallback<Models.StorageServiceProperties>): void;
|
||||
getProperties(options: Models.ServiceGetPropertiesOptionalParams, callback: msRest.ServiceCallback<Models.StorageServiceProperties>): void;
|
||||
getProperties(options?: Models.ServiceGetPropertiesOptionalParams, callback?: msRest.ServiceCallback<Models.StorageServiceProperties>): Promise<Models.ServiceGetPropertiesResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getPropertiesOperationSpec,
|
||||
callback) as Promise<Models.ServiceGetPropertiesResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* The List Shares Segment operation returns a list of the shares and share snapshots under the
|
||||
* specified account.
|
||||
*
|
||||
* @param {ServiceListSharesSegmentOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
listSharesSegment(): Promise<Models.ServiceListSharesSegmentResponse>;
|
||||
listSharesSegment(options: Models.ServiceListSharesSegmentOptionalParams): Promise<Models.ServiceListSharesSegmentResponse>;
|
||||
listSharesSegment(callback: msRest.ServiceCallback<Models.ListSharesResponse>): void;
|
||||
listSharesSegment(options: Models.ServiceListSharesSegmentOptionalParams, callback: msRest.ServiceCallback<Models.ListSharesResponse>): void;
|
||||
listSharesSegment(options?: Models.ServiceListSharesSegmentOptionalParams, callback?: msRest.ServiceCallback<Models.ListSharesResponse>): Promise<Models.ServiceListSharesSegmentResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
listSharesSegmentOperationSpec,
|
||||
callback) as Promise<Models.ServiceListSharesSegmentResponse>;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Operation Specifications
|
||||
const serializer = new msRest.Serializer(Mappers, true);
|
||||
const setPropertiesOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype0,
|
||||
Parameters.comp0
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
requestBody: {
|
||||
parameterPath: "storageServiceProperties",
|
||||
mapper: {
|
||||
...Mappers.StorageServiceProperties,
|
||||
required: true
|
||||
}
|
||||
},
|
||||
contentType: "application/xml; charset=utf-8",
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.ServiceSetPropertiesHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getPropertiesOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype0,
|
||||
Parameters.comp0
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: Mappers.StorageServiceProperties,
|
||||
headersMapper: Mappers.ServiceGetPropertiesHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const listSharesSegmentOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.prefix,
|
||||
Parameters.marker,
|
||||
Parameters.maxresults,
|
||||
Parameters.include,
|
||||
Parameters.timeout,
|
||||
Parameters.comp1
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: Mappers.ListSharesResponse,
|
||||
headersMapper: Mappers.ServiceListSharesSegmentHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
|
@ -0,0 +1,524 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
import * as Models from "../models";
|
||||
import * as Mappers from "../models/shareMappers";
|
||||
import * as Parameters from "../models/parameters";
|
||||
import { StorageClientContext } from "../storageClientContext";
|
||||
|
||||
/** Class representing a Share. */
|
||||
export class Share {
|
||||
private readonly client: StorageClientContext;
|
||||
|
||||
/**
|
||||
* Create a Share.
|
||||
* @param {StorageClientContext} client Reference to the service client.
|
||||
*/
|
||||
constructor(client: StorageClientContext) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new share under the specified account. If the share with the same name already exists,
|
||||
* the operation fails.
|
||||
*
|
||||
* @param {ShareCreateOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
create(): Promise<Models.ShareCreateResponse>;
|
||||
create(options: Models.ShareCreateOptionalParams): Promise<Models.ShareCreateResponse>;
|
||||
create(callback: msRest.ServiceCallback<void>): void;
|
||||
create(options: Models.ShareCreateOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
create(options?: Models.ShareCreateOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareCreateResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
createOperationSpec,
|
||||
callback) as Promise<Models.ShareCreateResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all user-defined metadata and system properties for the specified share or share
|
||||
* snapshot. The data returned does not include the share's list of files.
|
||||
*
|
||||
* @param {ShareGetPropertiesOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getProperties(): Promise<Models.ShareGetPropertiesResponse>;
|
||||
getProperties(options: Models.ShareGetPropertiesOptionalParams): Promise<Models.ShareGetPropertiesResponse>;
|
||||
getProperties(callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options: Models.ShareGetPropertiesOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
getProperties(options?: Models.ShareGetPropertiesOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareGetPropertiesResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getPropertiesOperationSpec,
|
||||
callback) as Promise<Models.ShareGetPropertiesResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Operation marks the specified share or share snapshot for deletion. The share or share snapshot
|
||||
* and any files contained within it are later deleted during garbage collection.
|
||||
*
|
||||
* @param {ShareDeleteMethodOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
deleteMethod(): Promise<Models.ShareDeleteResponse>;
|
||||
deleteMethod(options: Models.ShareDeleteMethodOptionalParams): Promise<Models.ShareDeleteResponse>;
|
||||
deleteMethod(callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options: Models.ShareDeleteMethodOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
deleteMethod(options?: Models.ShareDeleteMethodOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareDeleteResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
deleteMethodOperationSpec,
|
||||
callback) as Promise<Models.ShareDeleteResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a read-only snapshot of a share.
|
||||
*
|
||||
* @param {ShareCreateSnapshotOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
createSnapshot(): Promise<Models.ShareCreateSnapshotResponse>;
|
||||
createSnapshot(options: Models.ShareCreateSnapshotOptionalParams): Promise<Models.ShareCreateSnapshotResponse>;
|
||||
createSnapshot(callback: msRest.ServiceCallback<void>): void;
|
||||
createSnapshot(options: Models.ShareCreateSnapshotOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
createSnapshot(options?: Models.ShareCreateSnapshotOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareCreateSnapshotResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
createSnapshotOperationSpec,
|
||||
callback) as Promise<Models.ShareCreateSnapshotResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets quota for the specified share.
|
||||
*
|
||||
* @param {ShareSetQuotaOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setQuota(): Promise<Models.ShareSetQuotaResponse>;
|
||||
setQuota(options: Models.ShareSetQuotaOptionalParams): Promise<Models.ShareSetQuotaResponse>;
|
||||
setQuota(callback: msRest.ServiceCallback<void>): void;
|
||||
setQuota(options: Models.ShareSetQuotaOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setQuota(options?: Models.ShareSetQuotaOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareSetQuotaResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setQuotaOperationSpec,
|
||||
callback) as Promise<Models.ShareSetQuotaResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets one or more user-defined name-value pairs for the specified share.
|
||||
*
|
||||
* @param {ShareSetMetadataOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setMetadata(): Promise<Models.ShareSetMetadataResponse>;
|
||||
setMetadata(options: Models.ShareSetMetadataOptionalParams): Promise<Models.ShareSetMetadataResponse>;
|
||||
setMetadata(callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options: Models.ShareSetMetadataOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setMetadata(options?: Models.ShareSetMetadataOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareSetMetadataResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setMetadataOperationSpec,
|
||||
callback) as Promise<Models.ShareSetMetadataResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns information about stored access policies specified on the share.
|
||||
*
|
||||
* @param {ShareGetAccessPolicyOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getAccessPolicy(): Promise<Models.ShareGetAccessPolicyResponse>;
|
||||
getAccessPolicy(options: Models.ShareGetAccessPolicyOptionalParams): Promise<Models.ShareGetAccessPolicyResponse>;
|
||||
getAccessPolicy(callback: msRest.ServiceCallback<Models.SignedIdentifier[]>): void;
|
||||
getAccessPolicy(options: Models.ShareGetAccessPolicyOptionalParams, callback: msRest.ServiceCallback<Models.SignedIdentifier[]>): void;
|
||||
getAccessPolicy(options?: Models.ShareGetAccessPolicyOptionalParams, callback?: msRest.ServiceCallback<Models.SignedIdentifier[]>): Promise<Models.ShareGetAccessPolicyResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getAccessPolicyOperationSpec,
|
||||
callback) as Promise<Models.ShareGetAccessPolicyResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a stored access policy for use with shared access signatures.
|
||||
*
|
||||
* @param {ShareSetAccessPolicyOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
setAccessPolicy(): Promise<Models.ShareSetAccessPolicyResponse>;
|
||||
setAccessPolicy(options: Models.ShareSetAccessPolicyOptionalParams): Promise<Models.ShareSetAccessPolicyResponse>;
|
||||
setAccessPolicy(callback: msRest.ServiceCallback<void>): void;
|
||||
setAccessPolicy(options: Models.ShareSetAccessPolicyOptionalParams, callback: msRest.ServiceCallback<void>): void;
|
||||
setAccessPolicy(options?: Models.ShareSetAccessPolicyOptionalParams, callback?: msRest.ServiceCallback<void>): Promise<Models.ShareSetAccessPolicyResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
setAccessPolicyOperationSpec,
|
||||
callback) as Promise<Models.ShareSetAccessPolicyResponse>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves statistics related to the share.
|
||||
*
|
||||
* @param {ShareGetStatisticsOptionalParams} [options] Optional Parameters.
|
||||
*
|
||||
* @returns {Promise} A promise is returned
|
||||
*
|
||||
* @resolve {HttpOperationResponse} The deserialized result object.
|
||||
*
|
||||
* @reject {Error|ServiceError} The error object.
|
||||
*/
|
||||
getStatistics(): Promise<Models.ShareGetStatisticsResponse>;
|
||||
getStatistics(options: Models.ShareGetStatisticsOptionalParams): Promise<Models.ShareGetStatisticsResponse>;
|
||||
getStatistics(callback: msRest.ServiceCallback<Models.ShareStats>): void;
|
||||
getStatistics(options: Models.ShareGetStatisticsOptionalParams, callback: msRest.ServiceCallback<Models.ShareStats>): void;
|
||||
getStatistics(options?: Models.ShareGetStatisticsOptionalParams, callback?: msRest.ServiceCallback<Models.ShareStats>): Promise<Models.ShareGetStatisticsResponse> {
|
||||
return this.client.sendOperationRequest(
|
||||
{
|
||||
options
|
||||
},
|
||||
getStatisticsOperationSpec,
|
||||
callback) as Promise<Models.ShareGetStatisticsResponse>;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Operation Specifications
|
||||
const serializer = new msRest.Serializer(Mappers, true);
|
||||
const createOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.quota,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
201: {
|
||||
headersMapper: Mappers.ShareCreateHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getPropertiesOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.timeout,
|
||||
Parameters.restype1
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.ShareGetPropertiesHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const deleteMethodOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "DELETE",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.sharesnapshot,
|
||||
Parameters.timeout,
|
||||
Parameters.restype1
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.deleteSnapshots
|
||||
],
|
||||
responses: {
|
||||
202: {
|
||||
headersMapper: Mappers.ShareDeleteHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const createSnapshotOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp2
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
201: {
|
||||
headersMapper: Mappers.ShareCreateSnapshotHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setQuotaOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp0
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version,
|
||||
Parameters.quota
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.ShareSetQuotaHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setMetadataOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp3
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.metadata,
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.ShareSetMetadataHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getAccessPolicyOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp4
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: {
|
||||
xmlElementName: "SignedIdentifiers",
|
||||
serializedName: "parsedResponse",
|
||||
type: {
|
||||
name: "Sequence",
|
||||
element: {
|
||||
type: {
|
||||
name: "Composite",
|
||||
className: "SignedIdentifier"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
headersMapper: Mappers.ShareGetAccessPolicyHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const setAccessPolicyOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "PUT",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp4
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
requestBody: {
|
||||
parameterPath: [
|
||||
"options",
|
||||
"shareAcl"
|
||||
],
|
||||
mapper: {
|
||||
xmlName: "SignedIdentifiers",
|
||||
xmlElementName: "SignedIdentifiers",
|
||||
serializedName: "shareAcl",
|
||||
type: {
|
||||
name: "Sequence",
|
||||
element: {
|
||||
type: {
|
||||
name: "Composite",
|
||||
className: "SignedIdentifier"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
contentType: "application/xml; charset=utf-8",
|
||||
responses: {
|
||||
200: {
|
||||
headersMapper: Mappers.ShareSetAccessPolicyHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
||||
|
||||
const getStatisticsOperationSpec: msRest.OperationSpec = {
|
||||
httpMethod: "GET",
|
||||
path: "{shareName}",
|
||||
urlParameters: [
|
||||
Parameters.url
|
||||
],
|
||||
queryParameters: [
|
||||
Parameters.timeout,
|
||||
Parameters.restype1,
|
||||
Parameters.comp5
|
||||
],
|
||||
headerParameters: [
|
||||
Parameters.version
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
bodyMapper: Mappers.ShareStats,
|
||||
headersMapper: Mappers.ShareGetStatisticsHeaders
|
||||
},
|
||||
default: {
|
||||
bodyMapper: Mappers.StorageError
|
||||
}
|
||||
},
|
||||
isXML: true,
|
||||
serializer
|
||||
};
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
import * as Models from "./models";
|
||||
import * as Mappers from "./models/mappers";
|
||||
import * as operations from "./operations";
|
||||
import { StorageClientContext } from "./storageClientContext";
|
||||
|
||||
class StorageClient extends StorageClientContext {
|
||||
// Operation groups
|
||||
service: operations.Service;
|
||||
share: operations.Share;
|
||||
directory: operations.Directory;
|
||||
file: operations.File;
|
||||
|
||||
/**
|
||||
* @class
|
||||
* Initializes a new instance of the StorageClient class.
|
||||
* @constructor
|
||||
*
|
||||
* @param {object} [options] - The parameter options
|
||||
*
|
||||
* @param {Array} [options.filters] - Filters to be added to the request pipeline
|
||||
*
|
||||
* @param {object} [options.requestOptions] - The request options. Detailed info can be found at
|
||||
* {@link https://github.github.io/fetch/#Request Options doc}
|
||||
*
|
||||
* @param {boolean} [options.noRetryPolicy] - If set to true, turn off default retry policy
|
||||
*
|
||||
*/
|
||||
constructor(version: string, url: string, options?: msRest.ServiceClientOptions) {
|
||||
super(version, url, options);
|
||||
this.service = new operations.Service(this);
|
||||
this.share = new operations.Share(this);
|
||||
this.directory = new operations.Directory(this);
|
||||
this.file = new operations.File(this);
|
||||
}
|
||||
}
|
||||
|
||||
// Operation Specifications
|
||||
|
||||
export {
|
||||
StorageClient,
|
||||
StorageClientContext,
|
||||
Models as StorageModels,
|
||||
Mappers as StorageMappers
|
||||
};
|
||||
export * from "./operations";
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the MIT License. See License.txt in the project root for
|
||||
* license information.
|
||||
*
|
||||
* Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
* Changes may cause incorrect behavior and will be lost if the code is
|
||||
* regenerated.
|
||||
*/
|
||||
|
||||
import * as msRest from "ms-rest-js";
|
||||
|
||||
const packageName = "";
|
||||
const packageVersion = "";
|
||||
|
||||
export class StorageClientContext extends msRest.ServiceClient {
|
||||
version: string;
|
||||
url: string;
|
||||
|
||||
/**
|
||||
* @class
|
||||
* Initializes a new instance of the StorageClientContext class.
|
||||
* @constructor
|
||||
*
|
||||
* @param {string} version - Specifies the version of the operation to use for this request.
|
||||
*
|
||||
* @param {string} url - The URL of the service account, share, directory or file that is the target of the desired operation.
|
||||
*
|
||||
* @param {object} [options] - The parameter options
|
||||
*
|
||||
* @param {Array} [options.filters] - Filters to be added to the request pipeline
|
||||
*
|
||||
* @param {object} [options.requestOptions] - The request options. Detailed info can be found at
|
||||
* {@link https://github.github.io/fetch/#Request Options doc}
|
||||
*
|
||||
* @param {boolean} [options.noRetryPolicy] - If set to true, turn off default retry policy
|
||||
*
|
||||
*/
|
||||
constructor(version: string, url: string, options?: msRest.ServiceClientOptions) {
|
||||
if (version === null || version === undefined) {
|
||||
throw new Error('\'version\' cannot be null.');
|
||||
}
|
||||
if (url === null || url === undefined) {
|
||||
throw new Error('\'url\' cannot be null.');
|
||||
}
|
||||
|
||||
if (!options) {
|
||||
options = {};
|
||||
}
|
||||
|
||||
super(undefined, options);
|
||||
|
||||
this.baseUri = "{url}";
|
||||
this.requestContentType = "application/json; charset=utf-8";
|
||||
this.version = version;
|
||||
this.url = url;
|
||||
|
||||
this.addUserAgentInfo(`${packageName}/${packageVersion}`);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
import { generateUuid } from "ms-rest-js";
|
||||
|
||||
import { Aborter } from "./Aborter";
|
||||
import { BlockBlobURL } from "./BlockBlobURL";
|
||||
import {
|
||||
BlobUploadCommonResponse,
|
||||
IUploadToBlockBlobOptions
|
||||
} from "./highlevel.common";
|
||||
import { Batch } from "./utils/Batch";
|
||||
import {
|
||||
BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES,
|
||||
BLOCK_BLOB_MAX_BLOCKS,
|
||||
BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES,
|
||||
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
|
||||
} from "./utils/constants";
|
||||
import { generateBlockID } from "./utils/utils.common";
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN BROWSERS.
|
||||
*
|
||||
* Uploads a browser Blob/File/ArrayBuffer/ArrayBufferView object to block blob.
|
||||
*
|
||||
* When buffer length <= 256MB, this method will use 1 upload call to finish the upload.
|
||||
* Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList
|
||||
* to commit the block list.
|
||||
*
|
||||
* @export
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Blob | ArrayBuffer | ArrayBufferView} browserData Blob, File, ArrayBuffer or ArrayBufferView
|
||||
* @param {BlockBlobURL} blockBlobURL
|
||||
* @param {IUploadToBlockBlobOptions} [options]
|
||||
* @returns {Promise<BlobUploadCommonResponse>}
|
||||
*/
|
||||
export async function uploadBrowserDataToBlockBlob(
|
||||
aborter: Aborter,
|
||||
browserData: Blob | ArrayBuffer | ArrayBufferView,
|
||||
blockBlobURL: BlockBlobURL,
|
||||
options?: IUploadToBlockBlobOptions
|
||||
): Promise<BlobUploadCommonResponse> {
|
||||
const browserBlob = new Blob([browserData]);
|
||||
return UploadSeekableBlobToBlockBlob(
|
||||
aborter,
|
||||
(offset: number, size: number): Blob => {
|
||||
return browserBlob.slice(offset, offset + size);
|
||||
},
|
||||
browserBlob.size,
|
||||
blockBlobURL,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN BROWSERS.
|
||||
*
|
||||
* Uploads a browser Blob object to block blob. Requires a blobFactory as the data source,
|
||||
* which need to return a Blob object with the offset and size provided.
|
||||
*
|
||||
* When buffer length <= 256MB, this method will use 1 upload call to finish the upload.
|
||||
* Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList
|
||||
* to commit the block list.
|
||||
*
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {(offset: number, size: number) => Blob} blobFactory
|
||||
* @param {number} size
|
||||
* @param {BlockBlobURL} blockBlobURL
|
||||
* @param {IUploadToBlockBlobOptions} [options]
|
||||
* @returns {Promise<BlobUploadCommonResponse>}
|
||||
*/
|
||||
async function UploadSeekableBlobToBlockBlob(
|
||||
aborter: Aborter,
|
||||
blobFactory: (offset: number, size: number) => Blob,
|
||||
size: number,
|
||||
blockBlobURL: BlockBlobURL,
|
||||
options: IUploadToBlockBlobOptions = {}
|
||||
): Promise<BlobUploadCommonResponse> {
|
||||
if (!options.blockSize) {
|
||||
options.blockSize = 0;
|
||||
}
|
||||
if (
|
||||
options.blockSize < 0 ||
|
||||
options.blockSize > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
|
||||
) {
|
||||
throw new RangeError(
|
||||
`blockSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}`
|
||||
);
|
||||
}
|
||||
if (options.blockSize === 0) {
|
||||
if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) {
|
||||
throw new RangeError(`${size} is too larger to upload to a block blob.`);
|
||||
}
|
||||
if (size > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
|
||||
options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS);
|
||||
if (options.blockSize < BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES) {
|
||||
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!options.blobHTTPHeaders) {
|
||||
options.blobHTTPHeaders = {};
|
||||
}
|
||||
if (!options.blobAccessConditions) {
|
||||
options.blobAccessConditions = {};
|
||||
}
|
||||
|
||||
if (size <= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
|
||||
return blockBlobURL.upload(aborter, blobFactory(0, size), size, options);
|
||||
}
|
||||
|
||||
const numBlocks: number = Math.floor((size - 1) / options.blockSize) + 1;
|
||||
if (numBlocks > BLOCK_BLOB_MAX_BLOCKS) {
|
||||
throw new RangeError(
|
||||
`The buffer's size is too big or the BlockSize is too small;` +
|
||||
`the number of blocks must be <= ${BLOCK_BLOB_MAX_BLOCKS}`
|
||||
);
|
||||
}
|
||||
|
||||
const blockList: string[] = [];
|
||||
const blockIDPrefix = generateUuid();
|
||||
let transferProgress: number = 0;
|
||||
|
||||
const batch = new Batch(options.parallelism);
|
||||
for (let i = 0; i < numBlocks; i++) {
|
||||
batch.addOperation(
|
||||
async (): Promise<any> => {
|
||||
const blockID = generateBlockID(blockIDPrefix, i);
|
||||
const start = options.blockSize! * i;
|
||||
const end = i === numBlocks - 1 ? size : start + options.blockSize!;
|
||||
const contentLength = end - start;
|
||||
blockList.push(blockID);
|
||||
await blockBlobURL.stageBlock(
|
||||
aborter,
|
||||
blockID,
|
||||
blobFactory(start, contentLength),
|
||||
contentLength,
|
||||
{
|
||||
leaseAccessConditions: options.blobAccessConditions!
|
||||
.leaseAccessConditions
|
||||
}
|
||||
);
|
||||
// Update progress after block is successfully uploaded to server, in case of block trying
|
||||
// TODO: Hook with convenience layer progress event in finer level
|
||||
transferProgress += contentLength;
|
||||
if (options.progress) {
|
||||
options.progress!({
|
||||
loadedBytes: transferProgress
|
||||
});
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
await batch.do();
|
||||
|
||||
return blockBlobURL.commitBlockList(aborter, blockList, options);
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
import { HttpResponse, TransferProgressEvent } from "ms-rest-js";
|
||||
|
||||
import * as Models from "./generated/models";
|
||||
import { IBlobAccessConditions } from "./models";
|
||||
|
||||
/**
|
||||
* Option interface for uploadFileToBlockBlob and uploadSeekableStreamToBlockBlob.
|
||||
*
|
||||
* @export
|
||||
* @interface IUploadToBlockBlobOptions
|
||||
*/
|
||||
export interface IUploadToBlockBlobOptions {
|
||||
/**
|
||||
* Destination block blob size.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
blockSize?: number;
|
||||
|
||||
/**
|
||||
* Progress updater.
|
||||
*
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
|
||||
/**
|
||||
* Blob HTTP Headers.
|
||||
*
|
||||
* @type {IBlobHTTPHeaders}
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
blobHTTPHeaders?: Models.BlobHTTPHeaders;
|
||||
|
||||
/**
|
||||
* Metadata of block blob.
|
||||
*
|
||||
* @type {{ [propertyName: string]: string }}
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
metadata?: { [propertyName: string]: string };
|
||||
|
||||
/**
|
||||
* Access conditions headers.
|
||||
*
|
||||
* @type {IBlobAccessConditions}
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
|
||||
/**
|
||||
* Concurrency of parallel uploading. Must be >= 0.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IUploadToBlockBlobOptions
|
||||
*/
|
||||
parallelism?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type for uploadFileToBlockBlob, uploadStreamToBlockBlob and uploadBrowserDateToBlockBlob.
|
||||
*
|
||||
* @export
|
||||
*/
|
||||
export type BlobUploadCommonResponse = Models.BlockBlobUploadHeaders & {
|
||||
/**
|
||||
* The underlying HTTP response.
|
||||
*
|
||||
* @type {HttpResponse}
|
||||
* @memberof IBlobUploadCommonResponse
|
||||
*/
|
||||
_response: HttpResponse;
|
||||
};
|
||||
|
||||
/**
|
||||
* Option interface for DownloadBlockBlobToBuffer.
|
||||
*
|
||||
* @export
|
||||
* @interface IDownloadFromBlobOptions
|
||||
*/
|
||||
export interface IDownloadFromBlobOptions {
|
||||
/**
|
||||
* blockSize is the data every request trying to download.
|
||||
* Must be >= 0, if set to 0 or undefined, blockSize will automatically calculated according
|
||||
* to the blob size.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IDownloadFromBlobOptions
|
||||
*/
|
||||
blockSize?: number;
|
||||
|
||||
/**
|
||||
* Progress updater.
|
||||
*
|
||||
* @memberof IDownloadFromBlobOptions
|
||||
*/
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
|
||||
/**
|
||||
* Access conditions headers.
|
||||
*
|
||||
* @type {IBlobAccessConditions}
|
||||
* @memberof IDownloadFromBlobOptions
|
||||
*/
|
||||
blobAccessConditions?: IBlobAccessConditions;
|
||||
|
||||
/**
|
||||
* Concurrency of parallel download.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof IDownloadFromBlobOptions
|
||||
*/
|
||||
parallelism?: number;
|
||||
}
|
|
@ -0,0 +1,367 @@
|
|||
import * as fs from "fs";
|
||||
import { generateUuid, TransferProgressEvent } from "ms-rest-js";
|
||||
import { Readable } from "stream";
|
||||
|
||||
import { Aborter } from "./Aborter";
|
||||
import { BlobURL } from "./BlobURL";
|
||||
import { BlockBlobURL } from "./BlockBlobURL";
|
||||
import { BlobHTTPHeaders } from "./generated/models";
|
||||
import {
|
||||
BlobUploadCommonResponse,
|
||||
IDownloadFromBlobOptions,
|
||||
IUploadToBlockBlobOptions
|
||||
} from "./highlevel.common";
|
||||
import { IBlobAccessConditions } from "./models";
|
||||
import { Batch } from "./utils/Batch";
|
||||
import { BufferScheduler } from "./utils/BufferScheduler";
|
||||
import {
|
||||
BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES,
|
||||
BLOCK_BLOB_MAX_BLOCKS,
|
||||
BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES,
|
||||
BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
|
||||
} from "./utils/constants";
|
||||
import { generateBlockID } from "./utils/utils.common";
|
||||
import { streamToBuffer } from "./utils/utils.node";
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Uploads a local file in blocks to a block blob.
|
||||
*
|
||||
* When file size <= 256MB, this method will use 1 upload call to finish the upload.
|
||||
* Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList
|
||||
* to commit the block list.
|
||||
*
|
||||
* @export
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {string} filePath Full path of local file
|
||||
* @param {BlockBlobURL} blockBlobURL BlockBlobURL
|
||||
* @param {IUploadToBlockBlobOptions} [options] IUploadToBlockBlobOptions
|
||||
* @returns {(Promise<BlobUploadCommonResponse>)} ICommonResponse
|
||||
*/
|
||||
export async function uploadFileToBlockBlob(
|
||||
aborter: Aborter,
|
||||
filePath: string,
|
||||
blockBlobURL: BlockBlobURL,
|
||||
options?: IUploadToBlockBlobOptions
|
||||
): Promise<BlobUploadCommonResponse> {
|
||||
const size = fs.statSync(filePath).size;
|
||||
return uploadResetableStreamToBlockBlob(
|
||||
aborter,
|
||||
(offset, count) =>
|
||||
fs.createReadStream(filePath, {
|
||||
autoClose: true,
|
||||
end: count ? offset + count - 1 : Infinity,
|
||||
start: offset
|
||||
}),
|
||||
size,
|
||||
blockBlobURL,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Accepts a Node.js Readable stream factory, and uploads in blocks to a block blob.
|
||||
* The Readable stream factory must returns a Node.js Readable stream starting from the offset defined. The offset
|
||||
* is the offset in the block blob to be uploaded.
|
||||
*
|
||||
* When buffer length <= 256MB, this method will use 1 upload call to finish the upload.
|
||||
* Otherwise, this method will call stageBlock to upload blocks, and finally call commitBlockList
|
||||
* to commit the block list.
|
||||
*
|
||||
* @export
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {(offset: number) => NodeJS.ReadableStream} streamFactory Returns a Node.js Readable stream starting
|
||||
* from the offset defined
|
||||
* @param {number} size Size of the block blob
|
||||
* @param {BlockBlobURL} blockBlobURL BlockBlobURL
|
||||
* @param {IUploadToBlockBlobOptions} [options] IUploadToBlockBlobOptions
|
||||
* @returns {(Promise<BlobUploadCommonResponse>)} ICommonResponse
|
||||
*/
|
||||
async function uploadResetableStreamToBlockBlob(
|
||||
aborter: Aborter,
|
||||
streamFactory: (offset: number, count?: number) => NodeJS.ReadableStream,
|
||||
size: number,
|
||||
blockBlobURL: BlockBlobURL,
|
||||
options: IUploadToBlockBlobOptions = {}
|
||||
): Promise<BlobUploadCommonResponse> {
|
||||
if (!options.blockSize) {
|
||||
options.blockSize = 0;
|
||||
}
|
||||
if (
|
||||
options.blockSize < 0 ||
|
||||
options.blockSize > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES
|
||||
) {
|
||||
throw new RangeError(
|
||||
`blockSize option must be >= 0 and <= ${BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES}`
|
||||
);
|
||||
}
|
||||
if (options.blockSize === 0) {
|
||||
if (size > BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES * BLOCK_BLOB_MAX_BLOCKS) {
|
||||
throw new RangeError(`${size} is too larger to upload to a block blob.`);
|
||||
}
|
||||
if (size > BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
|
||||
options.blockSize = Math.ceil(size / BLOCK_BLOB_MAX_BLOCKS);
|
||||
if (options.blockSize < BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES) {
|
||||
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!options.blobHTTPHeaders) {
|
||||
options.blobHTTPHeaders = {};
|
||||
}
|
||||
if (!options.blobAccessConditions) {
|
||||
options.blobAccessConditions = {};
|
||||
}
|
||||
|
||||
if (size <= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES) {
|
||||
return blockBlobURL.upload(aborter, () => streamFactory(0), size, options);
|
||||
}
|
||||
|
||||
const numBlocks: number = Math.floor((size - 1) / options.blockSize) + 1;
|
||||
if (numBlocks > BLOCK_BLOB_MAX_BLOCKS) {
|
||||
throw new RangeError(
|
||||
`The buffer's size is too big or the BlockSize is too small;` +
|
||||
`the number of blocks must be <= ${BLOCK_BLOB_MAX_BLOCKS}`
|
||||
);
|
||||
}
|
||||
|
||||
const blockList: string[] = [];
|
||||
const blockIDPrefix = generateUuid();
|
||||
let transferProgress: number = 0;
|
||||
|
||||
const batch = new Batch(options.parallelism);
|
||||
for (let i = 0; i < numBlocks; i++) {
|
||||
batch.addOperation(
|
||||
async (): Promise<any> => {
|
||||
const blockID = generateBlockID(blockIDPrefix, i);
|
||||
const start = options.blockSize! * i;
|
||||
const end = i === numBlocks - 1 ? size : start + options.blockSize!;
|
||||
const contentLength = end - start;
|
||||
blockList.push(blockID);
|
||||
await blockBlobURL.stageBlock(
|
||||
aborter,
|
||||
blockID,
|
||||
() => streamFactory(start, contentLength),
|
||||
contentLength,
|
||||
{
|
||||
leaseAccessConditions: options.blobAccessConditions!
|
||||
.leaseAccessConditions
|
||||
}
|
||||
);
|
||||
// Update progress after block is successfully uploaded to server, in case of block trying
|
||||
transferProgress += contentLength;
|
||||
if (options.progress) {
|
||||
options.progress({ loadedBytes: transferProgress });
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
await batch.do();
|
||||
|
||||
return blockBlobURL.commitBlockList(aborter, blockList, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Downloads an Azure Blob in parallel to a buffer.
|
||||
* Offset and count are optional, pass 0 for both to download the entire blob.
|
||||
*
|
||||
* @export
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Buffer} buffer Buffer to be fill, must have length larger than count
|
||||
* @param {BlobURL} blobURL A BlobURL object
|
||||
* @param {number} offset From which position of the block blob to download
|
||||
* @param {number} [count] How much data to be downloaded. Will download to the end when passing undefined
|
||||
* @param {IDownloadFromBlobOptions} [options] IDownloadFromBlobOptions
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function downloadBlobToBuffer(
|
||||
aborter: Aborter,
|
||||
buffer: Buffer,
|
||||
blobURL: BlobURL,
|
||||
offset: number,
|
||||
count?: number,
|
||||
options: IDownloadFromBlobOptions = {}
|
||||
): Promise<void> {
|
||||
if (!options.blockSize) {
|
||||
options.blockSize = 0;
|
||||
}
|
||||
if (options.blockSize < 0) {
|
||||
throw new RangeError("blockSize option must be >= 0");
|
||||
}
|
||||
if (options.blockSize === 0) {
|
||||
options.blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES;
|
||||
}
|
||||
|
||||
if (offset < 0) {
|
||||
throw new RangeError("offset option must be >= 0");
|
||||
}
|
||||
|
||||
if (count && count <= 0) {
|
||||
throw new RangeError("count option must be > 0");
|
||||
}
|
||||
|
||||
if (!options.blobAccessConditions) {
|
||||
options.blobAccessConditions = {};
|
||||
}
|
||||
|
||||
// Customer doesn't specify length, get it
|
||||
if (!count) {
|
||||
const response = await blobURL.getProperties(aborter, options);
|
||||
count = response.contentLength! - offset;
|
||||
if (count < 0) {
|
||||
throw new RangeError(
|
||||
`offset ${offset} shouldn't be larger than blob size ${response.contentLength!}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer.length < count) {
|
||||
throw new RangeError(
|
||||
`The buffer's size should be equal to or larger than the request count of bytes: ${count}`
|
||||
);
|
||||
}
|
||||
|
||||
let transferProgress: number = 0;
|
||||
const batch = new Batch(options.parallelism);
|
||||
for (let off = offset; off < offset + count; off = off + options.blockSize) {
|
||||
batch.addOperation(async () => {
|
||||
const chunkEnd =
|
||||
off + options.blockSize! < count! ? off + options.blockSize! : count!;
|
||||
const response = await blobURL.download(
|
||||
aborter,
|
||||
off,
|
||||
chunkEnd - off + 1,
|
||||
{
|
||||
blobAccessConditions: options.blobAccessConditions
|
||||
}
|
||||
);
|
||||
const stream = response.readableStreamBody!;
|
||||
await streamToBuffer(stream, buffer, off - offset, chunkEnd - offset);
|
||||
// Update progress after block is downloaded, in case of block trying
|
||||
// Could provide finer grained progress updating inside HTTP requests,
|
||||
// only if convenience layer download try is enabled
|
||||
transferProgress += chunkEnd - off;
|
||||
if (options.progress) {
|
||||
options.progress({ loadedBytes: transferProgress });
|
||||
}
|
||||
});
|
||||
}
|
||||
await batch.do();
|
||||
}
|
||||
|
||||
/**
|
||||
* Option interface for uploadStreamToBlockBlob.
|
||||
*
|
||||
* @export
|
||||
* @interface IUploadStreamToBlockBlobOptions
|
||||
*/
|
||||
export interface IUploadStreamToBlockBlobOptions {
|
||||
/**
|
||||
* Blob HTTP Headers.
|
||||
*
|
||||
* @type {BlobHTTPHeaders}
|
||||
* @memberof IUploadStreamToBlockBlobOptions
|
||||
*/
|
||||
blobHTTPHeaders?: BlobHTTPHeaders;
|
||||
|
||||
/**
|
||||
* Metadata of block blob.
|
||||
*
|
||||
* @type {{ [propertyName: string]: string }}
|
||||
* @memberof IUploadStreamToBlockBlobOptions
|
||||
*/
|
||||
metadata?: { [propertyName: string]: string };
|
||||
|
||||
/**
|
||||
* Access conditions headers.
|
||||
*
|
||||
* @type {IBlobAccessConditions}
|
||||
* @memberof IUploadStreamToBlockBlobOptions
|
||||
*/
|
||||
accessConditions?: IBlobAccessConditions;
|
||||
|
||||
/**
|
||||
* Progress updater.
|
||||
*
|
||||
* @memberof IUploadStreamToBlockBlobOptions
|
||||
*/
|
||||
progress?: (progress: TransferProgressEvent) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* ONLY AVAILABLE IN NODE.JS RUNTIME.
|
||||
*
|
||||
* Uploads a Node.js Readable stream into block blob.
|
||||
*
|
||||
* PERFORMANCE IMPROVEMENT TIPS:
|
||||
* * Input stream highWaterMark is better to set a same value with bufferSize
|
||||
* parameter, which will avoid Buffer.concat() operations.
|
||||
*
|
||||
* @export
|
||||
* @param {Aborter} aborter Create a new Aborter instance with Aborter.none or Aborter.timeout(),
|
||||
* goto documents of Aborter for more examples about request cancellation
|
||||
* @param {Readable} stream Node.js Readable stream
|
||||
* @param {BlockBlobURL} blockBlobURL A BlockBlobURL instance
|
||||
* @param {number} bufferSize Size of every buffer allocated, also the block size in the uploaded block blob
|
||||
* @param {number} maxBuffers Max buffers will allocate during uploading, positive correlation
|
||||
* with max uploading concurrency
|
||||
* @param {IUploadStreamToBlockBlobOptions} [options]
|
||||
* @returns {Promise<BlobUploadCommonResponse>}
|
||||
*/
|
||||
export async function uploadStreamToBlockBlob(
|
||||
aborter: Aborter,
|
||||
stream: Readable,
|
||||
blockBlobURL: BlockBlobURL,
|
||||
bufferSize: number,
|
||||
maxBuffers: number,
|
||||
options: IUploadStreamToBlockBlobOptions = {}
|
||||
): Promise<BlobUploadCommonResponse> {
|
||||
if (!options.blobHTTPHeaders) {
|
||||
options.blobHTTPHeaders = {};
|
||||
}
|
||||
if (!options.accessConditions) {
|
||||
options.accessConditions = {};
|
||||
}
|
||||
|
||||
let blockNum = 0;
|
||||
const blockIDPrefix = generateUuid();
|
||||
let transferProgress: number = 0;
|
||||
const blockList: string[] = [];
|
||||
|
||||
const scheduler = new BufferScheduler(
|
||||
stream,
|
||||
bufferSize,
|
||||
maxBuffers,
|
||||
async (buffer: Buffer) => {
|
||||
const blockID = generateBlockID(blockIDPrefix, blockNum);
|
||||
blockList.push(blockID);
|
||||
blockNum++;
|
||||
|
||||
await blockBlobURL.stageBlock(aborter, blockID, buffer, buffer.length, {
|
||||
leaseAccessConditions: options.accessConditions!.leaseAccessConditions
|
||||
});
|
||||
|
||||
// Update progress after block is successfully uploaded to server, in case of block trying
|
||||
transferProgress += buffer.length;
|
||||
if (options.progress) {
|
||||
options.progress({ loadedBytes: transferProgress });
|
||||
}
|
||||
},
|
||||
// Parallelism should set a smaller value than maxBuffers, which is helpful to
|
||||
// reduce the possibility when a outgoing handler waits for stream data, in
|
||||
// this situation, outgoing handlers are blocked.
|
||||
// Outgoing queue shouldn't be empty.
|
||||
Math.ceil((maxBuffers / 4) * 3)
|
||||
);
|
||||
await scheduler.do();
|
||||
|
||||
return blockBlobURL.commitBlockList(aborter, blockList, options);
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
import { RestError } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
|
||||
export * from "./Aborter";
|
||||
export * from "./AppendBlobURL";
|
||||
export * from "./BlobURL";
|
||||
export * from "./BlockBlobURL";
|
||||
export * from "./ContainerURL";
|
||||
export * from "./credentials/AnonymousCredential";
|
||||
export * from "./credentials/Credential";
|
||||
export * from "./credentials/TokenCredential";
|
||||
export * from "./highlevel.browser";
|
||||
export * from "./highlevel.common";
|
||||
export { IIPRange } from "./IIPRange";
|
||||
export { IRange } from "./IRange";
|
||||
export * from "./PageBlobURL";
|
||||
export * from "./Pipeline";
|
||||
export * from "./policies/AnonymousCredentialPolicy";
|
||||
export * from "./policies/CredentialPolicy";
|
||||
export * from "./RetryPolicyFactory";
|
||||
export * from "./LoggingPolicyFactory";
|
||||
export * from "./TelemetryPolicyFactory";
|
||||
export * from "./policies/TokenCredentialPolicy";
|
||||
export * from "./UniqueRequestIDPolicyFactory";
|
||||
export * from "./SASQueryParameters";
|
||||
export * from "./ServiceURL";
|
||||
export * from "./StorageURL";
|
||||
export { Models, RestError };
|
|
@ -0,0 +1,38 @@
|
|||
import { RestError } from "ms-rest-js";
|
||||
|
||||
import * as Models from "../lib/generated/models";
|
||||
|
||||
export * from "./Aborter";
|
||||
export * from "./AccountSASPermissions";
|
||||
export * from "./AccountSASResourceTypes";
|
||||
export * from "./AccountSASServices";
|
||||
export * from "./IAccountSASSignatureValues";
|
||||
export * from "./AppendBlobURL";
|
||||
export * from "./BlobSASPermissions";
|
||||
export * from "./IBlobSASSignatureValues";
|
||||
export * from "./BlobURL";
|
||||
export * from "./BlockBlobURL";
|
||||
export * from "./ContainerSASPermissions";
|
||||
export * from "./ContainerURL";
|
||||
export * from "./credentials/AnonymousCredential";
|
||||
export * from "./credentials/Credential";
|
||||
export * from "./credentials/SharedKeyCredential";
|
||||
export * from "./credentials/TokenCredential";
|
||||
export * from "./highlevel.browser";
|
||||
export * from "./highlevel.common";
|
||||
export * from "./highlevel.node";
|
||||
export { IIPRange } from "./IIPRange";
|
||||
export { IRange } from "./IRange";
|
||||
export * from "./PageBlobURL";
|
||||
export * from "./Pipeline";
|
||||
export * from "./policies/AnonymousCredentialPolicy";
|
||||
export * from "./policies/CredentialPolicy";
|
||||
export * from "./RetryPolicyFactory";
|
||||
export * from "./LoggingPolicyFactory";
|
||||
export * from "./policies/SharedKeyCredentialPolicy";
|
||||
export * from "./TelemetryPolicyFactory";
|
||||
export * from "./policies/TokenCredentialPolicy";
|
||||
export * from "./UniqueRequestIDPolicyFactory";
|
||||
export * from "./ServiceURL";
|
||||
export * from "./StorageURL";
|
||||
export { Models, RestError };
|
|
@ -0,0 +1,23 @@
|
|||
import * as Models from "./generated/models";
|
||||
|
||||
export interface IMetadata {
|
||||
[propertyName: string]: string;
|
||||
}
|
||||
|
||||
export interface IContainerAccessConditions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
export interface IBlobAccessConditions {
|
||||
modifiedAccessConditions?: Models.ModifiedAccessConditions;
|
||||
leaseAccessConditions?: Models.LeaseAccessConditions;
|
||||
}
|
||||
|
||||
export interface IPageBlobAccessConditions extends IBlobAccessConditions {
|
||||
sequenceNumberAccessConditions?: Models.SequenceNumberAccessConditions;
|
||||
}
|
||||
|
||||
export interface IAppendBlobAccessConditions extends IBlobAccessConditions {
|
||||
appendPositionAccessConditions?: Models.AppendPositionAccessConditions;
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
import { RequestPolicy, RequestPolicyOptions } from "ms-rest-js";
|
||||
|
||||
import { CredentialPolicy } from "./CredentialPolicy";
|
||||
|
||||
/**
|
||||
* AnonymousCredentialPolicy is used with HTTP(S) requests that read public resources
|
||||
* or for use with Shared Access Signatures (SAS).
|
||||
*
|
||||
* @export
|
||||
* @class AnonymousCredentialPolicy
|
||||
* @extends {CredentialPolicy}
|
||||
*/
|
||||
export class AnonymousCredentialPolicy extends CredentialPolicy {
|
||||
/**
|
||||
* Creates an instance of AnonymousCredentialPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @memberof AnonymousCredentialPolicy
|
||||
*/
|
||||
constructor(nextPolicy: RequestPolicy, options: RequestPolicyOptions) {
|
||||
super(nextPolicy, options);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
HttpOperationResponse,
|
||||
isNode,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { HeaderConstants, URLConstants } from "../utils/constants";
|
||||
import { setURLParameter } from "../utils/utils.common";
|
||||
|
||||
/**
|
||||
* BrowserPolicy will handle differences between Node.js and browser runtime, including:
|
||||
*
|
||||
* 1. Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE'.
|
||||
* BrowserPolicy is a policy used to add a timestamp query to GET/HEAD request URL
|
||||
* thus avoid the browser cache.
|
||||
*
|
||||
* 2. Remove cookie header for security
|
||||
*
|
||||
* 3. Remove content-length header to avoid browsers warning
|
||||
*
|
||||
* @class BrowserPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export class BrowserPolicy extends BaseRequestPolicy {
|
||||
/**
|
||||
* Creates an instance of BrowserPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @memberof BrowserPolicy
|
||||
*/
|
||||
constructor(nextPolicy: RequestPolicy, options: RequestPolicyOptions) {
|
||||
super(nextPolicy, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends out request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof BrowserPolicy
|
||||
*/
|
||||
public async sendRequest(
|
||||
request: WebResource
|
||||
): Promise<HttpOperationResponse> {
|
||||
if (isNode) {
|
||||
return this._nextPolicy.sendRequest(request);
|
||||
}
|
||||
|
||||
if (
|
||||
request.method.toUpperCase() === "GET" ||
|
||||
request.method.toUpperCase() === "HEAD"
|
||||
) {
|
||||
request.url = setURLParameter(
|
||||
request.url,
|
||||
URLConstants.Parameters.FORCE_BROWSER_NO_CACHE,
|
||||
new Date().getTime().toString()
|
||||
);
|
||||
}
|
||||
|
||||
request.headers.remove(HeaderConstants.COOKIE);
|
||||
|
||||
// According to XHR standards, content-length should be fully controlled by browsers
|
||||
request.headers.remove(HeaderConstants.CONTENT_LENGTH);
|
||||
|
||||
return this._nextPolicy.sendRequest(request);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
HttpOperationResponse,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
/**
|
||||
* Credential policy used to sign HTTP(S) requests before sending. This is an
|
||||
* abstract class.
|
||||
*
|
||||
* @export
|
||||
* @abstract
|
||||
* @class CredentialPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export abstract class CredentialPolicy extends BaseRequestPolicy {
|
||||
/**
|
||||
* Sends out request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof CredentialPolicy
|
||||
*/
|
||||
public sendRequest(request: WebResource): Promise<HttpOperationResponse> {
|
||||
return this._nextPolicy.sendRequest(this.signRequest(request));
|
||||
}
|
||||
|
||||
/**
|
||||
* Child classes must implement this method with request signing. This method
|
||||
* will be executed in sendRequest().
|
||||
*
|
||||
* @protected
|
||||
* @abstract
|
||||
* @param {WebResource} request
|
||||
* @returns {WebResource}
|
||||
* @memberof CredentialPolicy
|
||||
*/
|
||||
protected signRequest(request: WebResource): WebResource {
|
||||
// Child classes must override this method with request signing. This method
|
||||
// will be executed in sendRequest().
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
HttpOperationResponse,
|
||||
HttpPipelineLogLevel,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { IRequestLogOptions } from "../LoggingPolicyFactory";
|
||||
import { HTTPURLConnection, URLConstants } from "../utils/constants";
|
||||
import { getURLParameter, setURLParameter } from "../utils/utils.common";
|
||||
|
||||
// Default values of IRetryOptions
|
||||
const DEFAULT_REQUEST_LOG_OPTIONS: IRequestLogOptions = {
|
||||
logWarningIfTryOverThreshold: 3000
|
||||
};
|
||||
|
||||
/**
|
||||
* LoggingPolicy is a policy used to log requests.
|
||||
*
|
||||
* @class LoggingPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export class LoggingPolicy extends BaseRequestPolicy {
|
||||
private tryCount: number = 0;
|
||||
private operationStartTime: Date = new Date();
|
||||
private requestStartTime: Date = new Date();
|
||||
|
||||
private readonly loggingOptions: IRequestLogOptions;
|
||||
|
||||
/**
|
||||
* Creates an instance of LoggingPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @param {IRequestLogOptions} [loggingOptions=DEFAULT_REQUEST_LOG_OPTIONS]
|
||||
* @memberof LoggingPolicy
|
||||
*/
|
||||
constructor(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions,
|
||||
loggingOptions: IRequestLogOptions = DEFAULT_REQUEST_LOG_OPTIONS
|
||||
) {
|
||||
super(nextPolicy, options);
|
||||
this.loggingOptions = loggingOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends out request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof LoggingPolicy
|
||||
*/
|
||||
public async sendRequest(
|
||||
request: WebResource
|
||||
): Promise<HttpOperationResponse> {
|
||||
this.tryCount++;
|
||||
this.requestStartTime = new Date();
|
||||
if (this.tryCount === 1) {
|
||||
this.operationStartTime = this.requestStartTime;
|
||||
}
|
||||
|
||||
let safeURL: string = request.url;
|
||||
if (getURLParameter(safeURL, URLConstants.Parameters.SIGNATURE)) {
|
||||
safeURL = setURLParameter(
|
||||
safeURL,
|
||||
URLConstants.Parameters.SIGNATURE,
|
||||
"*****"
|
||||
);
|
||||
}
|
||||
this.log(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`'${safeURL}'==> OUTGOING REQUEST (Try number=${this.tryCount}).`
|
||||
);
|
||||
|
||||
try {
|
||||
const response = await this._nextPolicy.sendRequest(request);
|
||||
|
||||
const requestEndTime = new Date();
|
||||
const requestCompletionTime =
|
||||
requestEndTime.getTime() - this.requestStartTime.getTime();
|
||||
const operationDuration =
|
||||
requestEndTime.getTime() - this.operationStartTime.getTime();
|
||||
|
||||
let currentLevel: HttpPipelineLogLevel = HttpPipelineLogLevel.INFO;
|
||||
let logMessage: string = "";
|
||||
if (this.shouldLog(HttpPipelineLogLevel.INFO)) {
|
||||
// Assume success and default to informational logging.
|
||||
logMessage = "Successfully Received Response. ";
|
||||
}
|
||||
|
||||
// If the response took too long, we'll upgrade to warning.
|
||||
if (
|
||||
requestCompletionTime >=
|
||||
this.loggingOptions.logWarningIfTryOverThreshold
|
||||
) {
|
||||
// Log a warning if the try duration exceeded the specified threshold.
|
||||
if (this.shouldLog(HttpPipelineLogLevel.WARNING)) {
|
||||
currentLevel = HttpPipelineLogLevel.WARNING;
|
||||
logMessage = `SLOW OPERATION. Duration > ${
|
||||
this.loggingOptions.logWarningIfTryOverThreshold
|
||||
} ms. `;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
(response.status >= 400 &&
|
||||
response.status <= 499 &&
|
||||
(response.status !== HTTPURLConnection.HTTP_NOT_FOUND &&
|
||||
response.status !== HTTPURLConnection.HTTP_CONFLICT &&
|
||||
response.status !== HTTPURLConnection.HTTP_PRECON_FAILED &&
|
||||
response.status !==
|
||||
HTTPURLConnection.HTTP_RANGE_NOT_SATISFIABLE)) ||
|
||||
(response.status >= 500 && response.status <= 509)
|
||||
) {
|
||||
const errorString = `REQUEST ERROR: HTTP request failed with status code: ${
|
||||
response.status
|
||||
}. `;
|
||||
logMessage = errorString;
|
||||
|
||||
currentLevel = HttpPipelineLogLevel.ERROR;
|
||||
}
|
||||
|
||||
const messageInfo = `Request try:${this.tryCount}, status:${
|
||||
response.status
|
||||
} request duration:${requestCompletionTime} ms, operation duration:${operationDuration} ms\n`;
|
||||
this.log(currentLevel, logMessage + messageInfo);
|
||||
|
||||
return response;
|
||||
} catch (err) {
|
||||
this.log(
|
||||
HttpPipelineLogLevel.ERROR,
|
||||
`Unexpected failure attempting to make request. Error message: ${
|
||||
err.message
|
||||
}`
|
||||
);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,347 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
delay,
|
||||
HttpOperationResponse,
|
||||
HttpPipelineLogLevel,
|
||||
RequestPolicy,
|
||||
RequestPolicyFactory,
|
||||
RequestPolicyOptions,
|
||||
RestError,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { IRetryOptions } from "../RetryPolicyFactory";
|
||||
import { URLConstants } from "../utils/constants";
|
||||
import { setURLHost, setURLParameter } from "../utils/utils.common";
|
||||
|
||||
/**
|
||||
* A factory method used to generated a RetryPolicy factory.
|
||||
*
|
||||
* @export
|
||||
* @param {IRetryOptions} retryOptions
|
||||
* @returns
|
||||
*/
|
||||
export function NewRetryPolicyFactory(
|
||||
retryOptions?: IRetryOptions
|
||||
): RequestPolicyFactory {
|
||||
return {
|
||||
create: (
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions
|
||||
): RetryPolicy => {
|
||||
return new RetryPolicy(nextPolicy, options, retryOptions);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* RetryPolicy types.
|
||||
*
|
||||
* @export
|
||||
* @enum {number}
|
||||
*/
|
||||
export enum RetryPolicyType {
|
||||
/**
|
||||
* Exponential retry. Retry time delay grows exponentially.
|
||||
*/
|
||||
EXPONENTIAL,
|
||||
/**
|
||||
* Linear retry. Retry time delay grows linearly.
|
||||
*/
|
||||
FIXED
|
||||
}
|
||||
|
||||
// Default values of IRetryOptions
|
||||
const DEFAULT_RETRY_OPTIONS: IRetryOptions = {
|
||||
maxRetryDelayInMs: 120 * 1000,
|
||||
maxTries: 4,
|
||||
retryDelayInMs: 4 * 1000,
|
||||
retryPolicyType: RetryPolicyType.EXPONENTIAL,
|
||||
secondaryHost: "",
|
||||
tryTimeout: 60
|
||||
};
|
||||
|
||||
/**
|
||||
* Retry policy with exponential retry and linear retry implemented.
|
||||
*
|
||||
* @class RetryPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export class RetryPolicy extends BaseRequestPolicy {
|
||||
/**
|
||||
* RetryOptions.
|
||||
*
|
||||
* @private
|
||||
* @type {IRetryOptions}
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
private readonly retryOptions: IRetryOptions;
|
||||
|
||||
/**
|
||||
* Creates an instance of RetryPolicy.
|
||||
*
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @param {IRetryOptions} [retryOptions=DEFAULT_RETRY_OPTIONS]
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
constructor(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions,
|
||||
retryOptions: IRetryOptions = DEFAULT_RETRY_OPTIONS
|
||||
) {
|
||||
super(nextPolicy, options);
|
||||
|
||||
// Initialize retry options
|
||||
this.retryOptions = {
|
||||
retryPolicyType: retryOptions.retryPolicyType
|
||||
? retryOptions.retryPolicyType
|
||||
: DEFAULT_RETRY_OPTIONS.retryPolicyType,
|
||||
|
||||
maxTries:
|
||||
retryOptions.maxTries && retryOptions.maxTries >= 1
|
||||
? Math.floor(retryOptions.maxTries)
|
||||
: DEFAULT_RETRY_OPTIONS.maxTries,
|
||||
|
||||
tryTimeout:
|
||||
retryOptions.tryTimeout && retryOptions.tryTimeout >= 0
|
||||
? retryOptions.tryTimeout
|
||||
: DEFAULT_RETRY_OPTIONS.tryTimeout,
|
||||
|
||||
retryDelayInMs:
|
||||
retryOptions.retryDelayInMs && retryOptions.retryDelayInMs >= 0
|
||||
? Math.min(
|
||||
retryOptions.retryDelayInMs,
|
||||
retryOptions.maxRetryDelayInMs
|
||||
? retryOptions.maxRetryDelayInMs
|
||||
: DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs!
|
||||
)
|
||||
: DEFAULT_RETRY_OPTIONS.retryDelayInMs,
|
||||
|
||||
maxRetryDelayInMs:
|
||||
retryOptions.maxRetryDelayInMs && retryOptions.maxRetryDelayInMs >= 0
|
||||
? retryOptions.maxRetryDelayInMs
|
||||
: DEFAULT_RETRY_OPTIONS.maxRetryDelayInMs,
|
||||
|
||||
secondaryHost: retryOptions.secondaryHost
|
||||
? retryOptions.secondaryHost
|
||||
: DEFAULT_RETRY_OPTIONS.secondaryHost
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
public async sendRequest(
|
||||
request: WebResource
|
||||
): Promise<HttpOperationResponse> {
|
||||
return this.attemptSendRequest(request, false, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decide and perform next retry. Won't mutate request parameter.
|
||||
*
|
||||
* @protected
|
||||
* @param {WebResource} request
|
||||
* @param {HttpOperationResponse} response
|
||||
* @param {boolean} secondaryHas404 If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
* the resource was not found. This may be due to replication delay. So, in this
|
||||
* case, we'll never try the secondary again for this operation.
|
||||
* @param {number} attempt How many retries has been attempted to performed, starting from 1, which includes
|
||||
* the attempt will be performed by this method call.
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
protected async attemptSendRequest(
|
||||
request: WebResource,
|
||||
secondaryHas404: boolean,
|
||||
attempt: number
|
||||
): Promise<HttpOperationResponse> {
|
||||
const newRequest: WebResource = request.clone();
|
||||
|
||||
const isPrimaryRetry =
|
||||
secondaryHas404 ||
|
||||
!this.retryOptions.secondaryHost ||
|
||||
!(
|
||||
request.method === "GET" ||
|
||||
request.method === "HEAD" ||
|
||||
request.method === "OPTIONS"
|
||||
) ||
|
||||
attempt % 2 === 1;
|
||||
|
||||
if (!isPrimaryRetry) {
|
||||
newRequest.url = setURLHost(
|
||||
newRequest.url,
|
||||
this.retryOptions.secondaryHost!
|
||||
);
|
||||
}
|
||||
|
||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
||||
newRequest.url = setURLParameter(
|
||||
newRequest.url,
|
||||
URLConstants.Parameters.TIMEOUT,
|
||||
this.retryOptions.tryTimeout!.toString()
|
||||
);
|
||||
|
||||
let response: HttpOperationResponse | undefined;
|
||||
try {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: =====> Try=${attempt} ${
|
||||
isPrimaryRetry ? "Primary" : "Secondary"
|
||||
}`
|
||||
);
|
||||
response = await this._nextPolicy.sendRequest(newRequest);
|
||||
if (!this.shouldRetry(isPrimaryRetry, attempt, response)) {
|
||||
return response;
|
||||
}
|
||||
|
||||
secondaryHas404 =
|
||||
secondaryHas404 || (!isPrimaryRetry && response.status === 404);
|
||||
} catch (err) {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.ERROR,
|
||||
`RetryPolicy: Caught error, message: ${err.message}, code: ${err.code}`
|
||||
);
|
||||
if (!this.shouldRetry(isPrimaryRetry, attempt, response, err)) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
await this.delay(isPrimaryRetry, attempt);
|
||||
return await this.attemptSendRequest(request, secondaryHas404, ++attempt);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decide whether to retry according to last HTTP response and retry counters.
|
||||
*
|
||||
* @protected
|
||||
* @param {boolean} isPrimaryRetry
|
||||
* @param {number} attempt
|
||||
* @param {HttpOperationResponse} [response]
|
||||
* @param {RestError} [err]
|
||||
* @returns {boolean}
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
protected shouldRetry(
|
||||
isPrimaryRetry: boolean,
|
||||
attempt: number,
|
||||
response?: HttpOperationResponse,
|
||||
err?: RestError
|
||||
): boolean {
|
||||
if (attempt >= this.retryOptions.maxTries!) {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: Attempt(s) ${attempt} >= maxTries ${this.retryOptions
|
||||
.maxTries!}, no further try.`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handle network failures, you may need to customize the list when you implement
|
||||
// your own http client
|
||||
const retriableErrors = [
|
||||
"ETIMEDOUT",
|
||||
"ESOCKETTIMEDOUT",
|
||||
"ECONNREFUSED",
|
||||
"ECONNRESET",
|
||||
"ENOENT",
|
||||
"ENOTFOUND",
|
||||
"TIMEOUT",
|
||||
"REQUEST_SEND_ERROR" // For default xhr based http client provided in ms-rest-js
|
||||
];
|
||||
if (err) {
|
||||
for (const retriableError of retriableErrors) {
|
||||
if (
|
||||
err.name.toUpperCase().includes(retriableError) ||
|
||||
err.message.toUpperCase().includes(retriableError) ||
|
||||
(err.code && err.code.toUpperCase().includes(retriableError))
|
||||
) {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: Network error ${retriableError} found, will retry.`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
||||
// the resource was not found. This may be due to replication delay. So, in this
|
||||
// case, we'll never try the secondary again for this operation.
|
||||
if (response || err) {
|
||||
const statusCode = response ? response.status : err ? err.statusCode : 0;
|
||||
if (!isPrimaryRetry && statusCode === 404) {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: Secondary access with 404, will retry.`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Server internal error or server timeout
|
||||
if (statusCode === 503 || statusCode === 500) {
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: Will retry for status code ${statusCode}.`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is to log for debugging purposes only.
|
||||
* Comment/uncomment as necessary for releasing/debugging.
|
||||
*
|
||||
* @private
|
||||
* @param {HttpPipelineLogLevel} level
|
||||
* @param {string} message
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
// tslint:disable-next-line:variable-name
|
||||
private logf(_level: HttpPipelineLogLevel, _message: string) {
|
||||
// this.log(_level, _message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delay a calculated time between retries.
|
||||
*
|
||||
* @private
|
||||
* @param {boolean} isPrimaryRetry
|
||||
* @param {number} attempt
|
||||
* @returns
|
||||
* @memberof RetryPolicy
|
||||
*/
|
||||
private async delay(isPrimaryRetry: boolean, attempt: number) {
|
||||
let delayTimeInMs: number = 0;
|
||||
|
||||
if (isPrimaryRetry) {
|
||||
switch (this.retryOptions.retryPolicyType) {
|
||||
case RetryPolicyType.EXPONENTIAL:
|
||||
delayTimeInMs = Math.min(
|
||||
(Math.pow(2, attempt - 1) - 1) * this.retryOptions.retryDelayInMs!,
|
||||
this.retryOptions.maxRetryDelayInMs!
|
||||
);
|
||||
break;
|
||||
case RetryPolicyType.FIXED:
|
||||
delayTimeInMs = this.retryOptions.retryDelayInMs!;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
delayTimeInMs = Math.random() * 1000;
|
||||
}
|
||||
|
||||
this.logf(
|
||||
HttpPipelineLogLevel.INFO,
|
||||
`RetryPolicy: Delay for ${delayTimeInMs}ms`
|
||||
);
|
||||
return delay(delayTimeInMs);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
import { RequestPolicy, RequestPolicyOptions, WebResource } from "ms-rest-js";
|
||||
import { SharedKeyCredential } from "../credentials/SharedKeyCredential";
|
||||
import { HeaderConstants } from "../utils/constants";
|
||||
import { getURLPath, getURLQueries } from "../utils/utils.common";
|
||||
import { CredentialPolicy } from "./CredentialPolicy";
|
||||
|
||||
/**
|
||||
* SharedKeyCredentialPolicy is a policy used to sign HTTP request with a shared key.
|
||||
*
|
||||
* @export
|
||||
* @class SharedKeyCredentialPolicy
|
||||
* @extends {CredentialPolicy}
|
||||
*/
|
||||
export class SharedKeyCredentialPolicy extends CredentialPolicy {
|
||||
/**
|
||||
* Reference to SharedKeyCredential which generates SharedKeyCredentialPolicy
|
||||
*
|
||||
* @type {SharedKeyCredential}
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
private readonly factory: SharedKeyCredential;
|
||||
|
||||
/**
|
||||
* Creates an instance of SharedKeyCredentialPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @param {SharedKeyCredential} factory
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
constructor(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions,
|
||||
factory: SharedKeyCredential
|
||||
) {
|
||||
super(nextPolicy, options);
|
||||
this.factory = factory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signs request.
|
||||
*
|
||||
* @protected
|
||||
* @param {WebResource} request
|
||||
* @returns {WebResource}
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
protected signRequest(request: WebResource): WebResource {
|
||||
request.headers.set(HeaderConstants.X_MS_DATE, new Date().toUTCString());
|
||||
|
||||
if (
|
||||
request.body &&
|
||||
typeof request.body === "string" &&
|
||||
request.body.length > 0
|
||||
) {
|
||||
request.headers.set(HeaderConstants.CONTENT_LENGTH, request.body.length);
|
||||
}
|
||||
|
||||
const stringToSign: string =
|
||||
[
|
||||
request.method.toUpperCase(),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LANGUAGE),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.CONTENT_ENCODING),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.CONTENT_LENGTH),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.CONTENT_MD5),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.CONTENT_TYPE),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.DATE),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.IF_MODIFIED_SINCE),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.IF_MATCH),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.IF_NONE_MATCH),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.IF_UNMODIFIED_SINCE),
|
||||
this.getHeaderValueToSign(request, HeaderConstants.RANGE)
|
||||
].join("\n") +
|
||||
"\n" +
|
||||
this.getCanonicalizedHeadersString(request) +
|
||||
this.getCanonicalizedResourceString(request);
|
||||
|
||||
const signature: string = this.factory.computeHMACSHA256(stringToSign);
|
||||
request.headers.set(
|
||||
HeaderConstants.AUTHORIZATION,
|
||||
`SharedKey ${this.factory.accountName}:${signature}`
|
||||
);
|
||||
|
||||
// console.log(`[URL]:${request.url}`);
|
||||
// console.log(`[HEADERS]:${request.headers.toString()}`);
|
||||
// console.log(`[STRING TO SIGN]:${JSON.stringify(stringToSign)}`);
|
||||
// console.log(`[KEY]: ${request.headers.get(HeaderConstants.AUTHORIZATION)}`);
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve header value according to shared key sign rules.
|
||||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key
|
||||
*
|
||||
* @private
|
||||
* @param {WebResource} request
|
||||
* @param {string} headerName
|
||||
* @returns {string}
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
private getHeaderValueToSign(
|
||||
request: WebResource,
|
||||
headerName: string
|
||||
): string {
|
||||
const value = request.headers.get(headerName);
|
||||
|
||||
if (!value) {
|
||||
return "";
|
||||
}
|
||||
|
||||
// When using version 2015-02-21 or later, if Content-Length is zero, then
|
||||
// set the Content-Length part of the StringToSign to an empty string.
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authenticate-with-shared-key
|
||||
if (headerName === HeaderConstants.CONTENT_LENGTH && value === "0") {
|
||||
return "";
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* To construct the CanonicalizedHeaders portion of the signature string, follow these steps:
|
||||
* 1. Retrieve all headers for the resource that begin with x-ms-, including the x-ms-date header.
|
||||
* 2. Convert each HTTP header name to lowercase.
|
||||
* 3. Sort the headers lexicographically by header name, in ascending order.
|
||||
* Each header may appear only once in the string.
|
||||
* 4. Replace any linear whitespace in the header value with a single space.
|
||||
* 5. Trim any whitespace around the colon in the header.
|
||||
* 6. Finally, append a new-line character to each canonicalized header in the resulting list.
|
||||
* Construct the CanonicalizedHeaders string by concatenating all headers in this list into a single string.
|
||||
*
|
||||
* @private
|
||||
* @param {WebResource} request
|
||||
* @returns {string}
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
private getCanonicalizedHeadersString(request: WebResource): string {
|
||||
let headersArray = request.headers.headersArray().filter(value => {
|
||||
return value.name
|
||||
.toLowerCase()
|
||||
.startsWith(HeaderConstants.PREFIX_FOR_STORAGE);
|
||||
});
|
||||
|
||||
headersArray.sort(
|
||||
(a, b): number => {
|
||||
return a.name.toLowerCase().localeCompare(b.name.toLowerCase());
|
||||
}
|
||||
);
|
||||
|
||||
// Remove duplicate headers
|
||||
headersArray = headersArray.filter((value, index, array) => {
|
||||
if (
|
||||
index > 0 &&
|
||||
value.name.toLowerCase() === array[index - 1].name.toLowerCase()
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
let canonicalizedHeadersStringToSign: string = "";
|
||||
headersArray.forEach(header => {
|
||||
canonicalizedHeadersStringToSign += `${header.name
|
||||
.toLowerCase()
|
||||
.trimRight()}:${header.value.trimLeft()}\n`;
|
||||
});
|
||||
|
||||
return canonicalizedHeadersStringToSign;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the webResource canonicalized resource string.
|
||||
*
|
||||
* @private
|
||||
* @param {WebResource} request
|
||||
* @returns {string}
|
||||
* @memberof SharedKeyCredentialPolicy
|
||||
*/
|
||||
private getCanonicalizedResourceString(request: WebResource): string {
|
||||
const path = encodeURI(getURLPath(request.url) || "/");
|
||||
|
||||
let canonicalizedResourceString: string = "";
|
||||
canonicalizedResourceString += `/${this.factory.accountName}${path}`;
|
||||
|
||||
const queries = getURLQueries(request.url);
|
||||
const lowercaseQueries: { [key: string]: string } = {};
|
||||
if (queries) {
|
||||
const queryKeys: string[] = [];
|
||||
for (const key in queries) {
|
||||
if (queries.hasOwnProperty(key)) {
|
||||
const lowercaseKey = key.toLowerCase();
|
||||
lowercaseQueries[lowercaseKey] = queries[key];
|
||||
queryKeys.push(lowercaseKey);
|
||||
}
|
||||
}
|
||||
|
||||
queryKeys.sort();
|
||||
for (const key of queryKeys) {
|
||||
canonicalizedResourceString += `\n${key}:${decodeURIComponent(
|
||||
lowercaseQueries[key]
|
||||
)}`;
|
||||
}
|
||||
}
|
||||
|
||||
return canonicalizedResourceString;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
HttpHeaders,
|
||||
HttpOperationResponse,
|
||||
isNode,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { HeaderConstants } from "../utils/constants";
|
||||
|
||||
/**
|
||||
* TelemetryPolicy is a policy used to tag user-agent header for every requests.
|
||||
*
|
||||
* @class TelemetryPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export class TelemetryPolicy extends BaseRequestPolicy {
|
||||
/**
|
||||
* Telemetry string.
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof TelemetryPolicy
|
||||
*/
|
||||
public readonly telemetry: string;
|
||||
|
||||
/**
|
||||
* Creates an instance of TelemetryPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @param {ITelemetryOptions} [telemetry]
|
||||
* @memberof TelemetryPolicy
|
||||
*/
|
||||
constructor(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions,
|
||||
telemetry: string
|
||||
) {
|
||||
super(nextPolicy, options);
|
||||
this.telemetry = telemetry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends out request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof TelemetryPolicy
|
||||
*/
|
||||
public async sendRequest(
|
||||
request: WebResource
|
||||
): Promise<HttpOperationResponse> {
|
||||
if (isNode) {
|
||||
if (!request.headers) {
|
||||
request.headers = new HttpHeaders();
|
||||
}
|
||||
if (!request.headers.get(HeaderConstants.USER_AGENT)) {
|
||||
request.headers.set(HeaderConstants.USER_AGENT, this.telemetry);
|
||||
}
|
||||
}
|
||||
|
||||
return this._nextPolicy.sendRequest(request);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
import {
|
||||
HttpHeaders,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { TokenCredential } from "../credentials/TokenCredential";
|
||||
import { HeaderConstants } from "../utils/constants";
|
||||
import { CredentialPolicy } from "./CredentialPolicy";
|
||||
|
||||
/**
|
||||
* TokenCredentialPolicy is a policy used to sign HTTP request with a token.
|
||||
* Such as an OAuth bearer token.
|
||||
*
|
||||
* @export
|
||||
* @class TokenCredentialPolicy
|
||||
* @extends {CredentialPolicy}
|
||||
*/
|
||||
export class TokenCredentialPolicy extends CredentialPolicy {
|
||||
/**
|
||||
* The value of token.
|
||||
*
|
||||
* @type {TokenCredential}
|
||||
* @memberof TokenCredentialPolicy
|
||||
*/
|
||||
public readonly tokenCredential: TokenCredential;
|
||||
|
||||
/**
|
||||
* Token authorization scheme, default header is "Bearer".
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof TokenCredentialPolicy
|
||||
*/
|
||||
public readonly authorizationScheme: string;
|
||||
|
||||
/**
|
||||
* Creates an instance of TokenCredentialPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @param {TokenCredential} tokenCredential
|
||||
* @memberof TokenCredentialPolicy
|
||||
*/
|
||||
constructor(
|
||||
nextPolicy: RequestPolicy,
|
||||
options: RequestPolicyOptions,
|
||||
tokenCredential: TokenCredential
|
||||
) {
|
||||
super(nextPolicy, options);
|
||||
this.tokenCredential = tokenCredential;
|
||||
this.authorizationScheme = HeaderConstants.AUTHORIZATION_SCHEME;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sign request with token.
|
||||
*
|
||||
* @protected
|
||||
* @param {WebResource} request
|
||||
* @returns {WebResource}
|
||||
* @memberof TokenCredentialPolicy
|
||||
*/
|
||||
protected signRequest(request: WebResource): WebResource {
|
||||
if (!request.headers) {
|
||||
request.headers = new HttpHeaders();
|
||||
}
|
||||
request.headers.set(
|
||||
HeaderConstants.AUTHORIZATION,
|
||||
`${this.authorizationScheme} ${this.tokenCredential.token}`
|
||||
);
|
||||
return request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
import {
|
||||
BaseRequestPolicy,
|
||||
generateUuid,
|
||||
HttpOperationResponse,
|
||||
RequestPolicy,
|
||||
RequestPolicyOptions,
|
||||
WebResource
|
||||
} from "ms-rest-js";
|
||||
|
||||
import { HeaderConstants } from "../utils/constants";
|
||||
|
||||
/**
|
||||
* UniqueRequestIDPolicy generates an UUID as x-ms-request-id header value.
|
||||
*
|
||||
* @class UniqueRequestIDPolicy
|
||||
* @extends {BaseRequestPolicy}
|
||||
*/
|
||||
export class UniqueRequestIDPolicy extends BaseRequestPolicy {
|
||||
/**
|
||||
* Creates an instance of UniqueRequestIDPolicy.
|
||||
* @param {RequestPolicy} nextPolicy
|
||||
* @param {RequestPolicyOptions} options
|
||||
* @memberof UniqueRequestIDPolicy
|
||||
*/
|
||||
constructor(nextPolicy: RequestPolicy, options: RequestPolicyOptions) {
|
||||
super(nextPolicy, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends request.
|
||||
*
|
||||
* @param {WebResource} request
|
||||
* @returns {Promise<HttpOperationResponse>}
|
||||
* @memberof UniqueRequestIDPolicy
|
||||
*/
|
||||
public async sendRequest(
|
||||
request: WebResource
|
||||
): Promise<HttpOperationResponse> {
|
||||
if (!request.headers.contains(HeaderConstants.X_MS_CLIENT_REQUEST_ID)) {
|
||||
request.headers.set(
|
||||
HeaderConstants.X_MS_CLIENT_REQUEST_ID,
|
||||
generateUuid()
|
||||
);
|
||||
}
|
||||
|
||||
return this._nextPolicy.sendRequest(request);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
// In browser, during webpack or browserify bundling, this module will be replaced by 'events'
|
||||
// https://github.com/Gozala/events
|
||||
import { EventEmitter } from "events";
|
||||
|
||||
/**
|
||||
* Operation is an async function to be executed and managed by Batch.
|
||||
*/
|
||||
export declare type Operation = () => Promise<any>;
|
||||
|
||||
/**
|
||||
* States for Batch.
|
||||
*
|
||||
* @enum {number}
|
||||
*/
|
||||
enum BatchStates {
|
||||
Good,
|
||||
Error
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch provides basic parallel execution with concurrency limits.
|
||||
* Will stop execute left operations when one of the executed operation throws an error.
|
||||
* But Batch cannot cancel ongoing operations, you need to cancel them by yourself.
|
||||
*
|
||||
* @export
|
||||
* @class Batch
|
||||
*/
|
||||
export class Batch {
|
||||
/**
|
||||
* Concurrency. Must be lager than 0.
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private concurrency: number;
|
||||
|
||||
/**
|
||||
* Number of active operations under execution.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private actives: number = 0;
|
||||
|
||||
/**
|
||||
* Number of completed operations under execution.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private completed: number = 0;
|
||||
|
||||
/**
|
||||
* Offset of next operation to be executed.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private offset: number = 0;
|
||||
|
||||
/**
|
||||
* Operation array to be executed.
|
||||
*
|
||||
* @private
|
||||
* @type {Operation[]}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private operations: Operation[] = [];
|
||||
|
||||
/**
|
||||
* States of Batch. When an error happens, state will turn into error.
|
||||
* Batch will stop execute left operations.
|
||||
*
|
||||
* @private
|
||||
* @type {BatchStates}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private state: BatchStates = BatchStates.Good;
|
||||
|
||||
/**
|
||||
* A private emitter used to pass events inside this class.
|
||||
*
|
||||
* @private
|
||||
* @type {EventEmitter}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private emitter: EventEmitter;
|
||||
|
||||
/**
|
||||
* Creates an instance of Batch.
|
||||
* @param {number} [concurrency=5]
|
||||
* @memberof Batch
|
||||
*/
|
||||
public constructor(concurrency: number = 5) {
|
||||
if (concurrency < 1) {
|
||||
throw new RangeError("concurrency must be larger than 0");
|
||||
}
|
||||
this.concurrency = concurrency;
|
||||
this.emitter = new EventEmitter();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a operation into queue.
|
||||
*
|
||||
* @param {Operation} operation
|
||||
* @memberof Batch
|
||||
*/
|
||||
public addOperation(operation: Operation): void {
|
||||
this.operations.push(async () => {
|
||||
try {
|
||||
this.actives++;
|
||||
await operation();
|
||||
this.actives--;
|
||||
this.completed++;
|
||||
this.parallelExecute();
|
||||
} catch (error) {
|
||||
this.emitter.emit("error", error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Start execute operations in the queue.
|
||||
*
|
||||
* @returns {Promise<void>}
|
||||
* @memberof Batch
|
||||
*/
|
||||
public async do(): Promise<void> {
|
||||
this.parallelExecute();
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
this.emitter.on("finish", resolve);
|
||||
|
||||
this.emitter.on("error", error => {
|
||||
this.state = BatchStates.Error;
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get next operation to be executed. Return null when reaching ends.
|
||||
*
|
||||
* @private
|
||||
* @returns {(Operation | null)}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private nextOperation(): Operation | null {
|
||||
if (this.offset < this.operations.length) {
|
||||
return this.operations[this.offset++];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start execute operations. One one the most important difference between
|
||||
* this method with do() is that do() wraps as an sync method.
|
||||
*
|
||||
* @private
|
||||
* @returns {void}
|
||||
* @memberof Batch
|
||||
*/
|
||||
private parallelExecute(): void {
|
||||
if (this.state === BatchStates.Error) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.completed >= this.operations.length) {
|
||||
this.emitter.emit("finish");
|
||||
return;
|
||||
}
|
||||
|
||||
while (this.actives < this.concurrency) {
|
||||
const operation = this.nextOperation();
|
||||
if (operation) {
|
||||
operation();
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,439 @@
|
|||
import { EventEmitter } from "events";
|
||||
import { Readable } from "stream";
|
||||
|
||||
/**
|
||||
* OutgoingHandler is an async function triggered by BufferScheduler.
|
||||
*/
|
||||
export declare type OutgoingHandler = (
|
||||
buffer: Buffer,
|
||||
offset?: number
|
||||
) => Promise<any>;
|
||||
|
||||
/**
|
||||
* This class accepts a Node.js Readable stream as input, and keeps reading data
|
||||
* from the stream into the internal buffer structure, until it reaches maxBuffers.
|
||||
* Every available buffer will try to trigger outgoingHandler.
|
||||
*
|
||||
* The internal buffer structure includes an incoming buffer array, and a outgoing
|
||||
* buffer array. The incoming buffer array includes the "empty" buffers can be filled
|
||||
* with new incoming data. The outgoing array includes the filled buffers to be
|
||||
* handled by outgoingHandler. Every above buffer size is defined by parameter bufferSize.
|
||||
*
|
||||
* NUM_OF_ALL_BUFFERS = BUFFERS_IN_INCOMING + BUFFERS_IN_OUTGOING + BUFFERS_UNDER_HANDLING
|
||||
*
|
||||
* NUM_OF_ALL_BUFFERS <= maxBuffers
|
||||
*
|
||||
* PERFORMANCE IMPROVEMENT TIPS:
|
||||
* 1. Input stream highWaterMark is better to set a same value with bufferSize
|
||||
* parameter, which will avoid Buffer.concat() operations.
|
||||
* 2. Parallelism should set a smaller value than maxBuffers, which is helpful to
|
||||
* reduce the possibility when a outgoing handler waits for the stream data.
|
||||
* in this situation, outgoing handlers are blocked.
|
||||
* Outgoing queue shouldn't be empty.
|
||||
* @export
|
||||
* @class BufferScheduler
|
||||
*/
|
||||
export class BufferScheduler {
|
||||
/**
|
||||
* Size of buffers in incoming and outgoing queues. This class will try to align
|
||||
* data read from Readable stream into buffer chunks with bufferSize defined.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly bufferSize: number;
|
||||
|
||||
/**
|
||||
* How many buffers can be created or maintained.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly maxBuffers: number;
|
||||
|
||||
/**
|
||||
* A Node.js Readable stream.
|
||||
*
|
||||
* @private
|
||||
* @type {Readable}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly readable: Readable;
|
||||
|
||||
/**
|
||||
* OutgoingHandler is an async function triggered by BufferScheduler when there
|
||||
* are available buffers in outgoing array.
|
||||
*
|
||||
* @private
|
||||
* @type {OutgoingHandler}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly outgoingHandler: OutgoingHandler;
|
||||
|
||||
/**
|
||||
* An internal event emitter.
|
||||
*
|
||||
* @private
|
||||
* @type {EventEmitter}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly emitter: EventEmitter = new EventEmitter();
|
||||
|
||||
/**
|
||||
* Concurrency of executing outgoingHandlers. (0 < parallelism <= maxBuffers)
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private readonly parallelism: number;
|
||||
|
||||
/**
|
||||
* An internal offset marker to track data offset in bytes of next outgoingHandler.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private offset: number = 0;
|
||||
|
||||
/**
|
||||
* An internal marker to track whether stream is end.
|
||||
*
|
||||
* @private
|
||||
* @type {boolean}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private isStreamEnd: boolean = false;
|
||||
|
||||
/**
|
||||
* An internal marker to track whether stream or outgoingHandler returns error.
|
||||
*
|
||||
* @private
|
||||
* @type {boolean}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private isError: boolean = false;
|
||||
|
||||
/**
|
||||
* How many handlers are executing.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private executingOutgoingHandlers: number = 0;
|
||||
|
||||
/**
|
||||
* Encoding of the input Readable stream which has string data type instead of Buffer.
|
||||
*
|
||||
* @private
|
||||
* @type {string}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private encoding?: string;
|
||||
|
||||
/**
|
||||
* How many buffers have been allocated.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private numBuffers: number = 0;
|
||||
|
||||
/**
|
||||
* Because this class doesn't know how much data every time stream pops, which
|
||||
* is defined by highWaterMarker of the stream. So BufferScheduler will cache
|
||||
* data received from the stream, when data in unresolvedDataArray exceeds the
|
||||
* blockSize defined, it will try to concat a blockSize of buffer, fill into available
|
||||
* buffers from incoming and push to outgoing array.
|
||||
*
|
||||
* @private
|
||||
* @type {Buffer[]}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private unresolvedDataArray: Buffer[] = [];
|
||||
|
||||
/**
|
||||
* How much data consisted in unresolvedDataArray.
|
||||
*
|
||||
* @private
|
||||
* @type {number}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private unresolvedLength: number = 0;
|
||||
|
||||
/**
|
||||
* The array includes all the available buffers can be used to fill data from stream.
|
||||
*
|
||||
* @private
|
||||
* @type {Buffer[]}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private incoming: Buffer[] = [];
|
||||
|
||||
/**
|
||||
* The array (queue) includes all the buffers filled from stream data.
|
||||
*
|
||||
* @private
|
||||
* @type {Buffer[]}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private outgoing: Buffer[] = [];
|
||||
|
||||
/**
|
||||
* Creates an instance of BufferScheduler.
|
||||
*
|
||||
* @param {Readable} readable A Node.js Readable stream
|
||||
* @param {number} bufferSize Buffer size of every maintained buffer
|
||||
* @param {number} maxBuffers How many buffers can be allocated
|
||||
* @param {OutgoingHandler} outgoingHandler An async function scheduled to be
|
||||
* triggered when a buffer fully filled
|
||||
* with stream data
|
||||
* @param {number} parallelism Concurrency of executing outgoingHandlers (>0)
|
||||
* @param {string} [encoding] [Optional] Encoding of Readable stream when it's a string stream
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
constructor(
|
||||
readable: Readable,
|
||||
bufferSize: number,
|
||||
maxBuffers: number,
|
||||
outgoingHandler: OutgoingHandler,
|
||||
parallelism: number,
|
||||
encoding?: string
|
||||
) {
|
||||
if (bufferSize <= 0) {
|
||||
throw new RangeError(
|
||||
`bufferSize must be larger than 0, current is ${bufferSize}`
|
||||
);
|
||||
}
|
||||
|
||||
if (maxBuffers <= 0) {
|
||||
throw new RangeError(
|
||||
`maxBuffers must be larger than 0, current is ${maxBuffers}`
|
||||
);
|
||||
}
|
||||
|
||||
if (parallelism <= 0) {
|
||||
throw new RangeError(
|
||||
`parallelism must be larger than 0, current is ${parallelism}`
|
||||
);
|
||||
}
|
||||
|
||||
this.bufferSize = bufferSize;
|
||||
this.maxBuffers = maxBuffers;
|
||||
this.readable = readable;
|
||||
this.outgoingHandler = outgoingHandler;
|
||||
this.parallelism = parallelism;
|
||||
this.encoding = encoding;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the scheduler, will return error when stream of any of the outgoingHandlers
|
||||
* returns error.
|
||||
*
|
||||
* @returns {Promise<void>}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
public async do(): Promise<void> {
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
this.readable.on("data", data => {
|
||||
data =
|
||||
typeof data === "string" ? Buffer.from(data, this.encoding) : data;
|
||||
this.appendUnresolvedData(data);
|
||||
|
||||
if (!this.resolveData()) {
|
||||
this.readable.pause();
|
||||
}
|
||||
});
|
||||
|
||||
this.readable.on("error", err => {
|
||||
this.emitter.emit("error", err);
|
||||
});
|
||||
|
||||
this.readable.on("end", () => {
|
||||
this.isStreamEnd = true;
|
||||
this.emitter.emit("checkEnd");
|
||||
});
|
||||
|
||||
this.emitter.on("error", err => {
|
||||
this.isError = true;
|
||||
this.readable.pause();
|
||||
reject(err);
|
||||
});
|
||||
|
||||
this.emitter.on("checkEnd", () => {
|
||||
if (this.outgoing.length > 0) {
|
||||
this.triggerOutgoingHandlers();
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.isStreamEnd && this.executingOutgoingHandlers === 0) {
|
||||
if (
|
||||
this.unresolvedLength > 0 &&
|
||||
this.unresolvedLength < this.bufferSize
|
||||
) {
|
||||
this.outgoingHandler(
|
||||
this.shiftBufferFromUnresolvedDataArray(),
|
||||
this.offset
|
||||
)
|
||||
.then(resolve)
|
||||
.catch(reject);
|
||||
} else if (this.unresolvedLength >= this.bufferSize) {
|
||||
return;
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert a new data into unresolved array.
|
||||
*
|
||||
* @private
|
||||
* @param {Buffer} data
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private appendUnresolvedData(data: Buffer) {
|
||||
this.unresolvedDataArray.push(data);
|
||||
this.unresolvedLength += data.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to shift a buffer with size in blockSize. The buffer returned may be less
|
||||
* than blockSize when data in unresolvedDataArray is less than bufferSize.
|
||||
*
|
||||
* @private
|
||||
* @returns {Buffer}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private shiftBufferFromUnresolvedDataArray(): Buffer {
|
||||
if (this.unresolvedLength >= this.bufferSize) {
|
||||
if (this.bufferSize === this.unresolvedDataArray[0].length) {
|
||||
this.unresolvedLength -= this.bufferSize;
|
||||
return this.unresolvedDataArray.shift()!;
|
||||
}
|
||||
|
||||
// Lazy concat because Buffer.concat highly drops performance
|
||||
let merged = Buffer.concat(
|
||||
this.unresolvedDataArray,
|
||||
this.unresolvedLength
|
||||
);
|
||||
const buffer = merged.slice(0, this.bufferSize);
|
||||
merged = merged.slice(this.bufferSize);
|
||||
this.unresolvedDataArray = [merged];
|
||||
this.unresolvedLength -= buffer.length;
|
||||
return buffer;
|
||||
} else if (this.unresolvedLength > 0) {
|
||||
const merged = Buffer.concat(
|
||||
this.unresolvedDataArray,
|
||||
this.unresolvedLength
|
||||
);
|
||||
this.unresolvedDataArray = [];
|
||||
this.unresolvedLength = 0;
|
||||
return merged;
|
||||
} else {
|
||||
return Buffer.allocUnsafe(0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve data in unresolvedDataArray. For every buffer with size in blockSize
|
||||
* shifted, it will try to get (or allocate a buffer) from incoming, and fill it,
|
||||
* then push it into outgoing to be handled by outgoing handler.
|
||||
*
|
||||
* Return false when available buffers in incoming are not enough, else true.
|
||||
*
|
||||
* @private
|
||||
* @returns {boolean} Return false when buffers in incoming are not enough, else true.
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private resolveData(): boolean {
|
||||
while (this.unresolvedLength >= this.bufferSize) {
|
||||
let buffer: Buffer;
|
||||
|
||||
if (this.incoming.length > 0) {
|
||||
buffer = this.incoming.shift()!;
|
||||
} else {
|
||||
if (this.numBuffers < this.maxBuffers) {
|
||||
buffer = Buffer.allocUnsafe(this.bufferSize);
|
||||
this.numBuffers++;
|
||||
} else {
|
||||
// No available buffer, wait for buffer returned
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
buffer.fill(this.shiftBufferFromUnresolvedDataArray());
|
||||
this.outgoing.push(buffer);
|
||||
this.triggerOutgoingHandlers();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to trigger a outgoing handler for every buffer in outgoing. Stop when
|
||||
* parallelism reaches.
|
||||
*
|
||||
* @private
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private async triggerOutgoingHandlers() {
|
||||
let buffer: Buffer | undefined;
|
||||
do {
|
||||
if (this.executingOutgoingHandlers >= this.parallelism) {
|
||||
return;
|
||||
}
|
||||
|
||||
buffer = this.outgoing.shift();
|
||||
if (buffer) {
|
||||
this.triggerOutgoingHandler(buffer);
|
||||
}
|
||||
} while (buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a outgoing handler for a buffer shifted from outgoing.
|
||||
*
|
||||
* @private
|
||||
* @param {Buffer} buffer
|
||||
* @returns {Promise<any>}
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private async triggerOutgoingHandler(buffer: Buffer): Promise<any> {
|
||||
const bufferLength = buffer.length;
|
||||
|
||||
this.executingOutgoingHandlers++;
|
||||
this.offset += bufferLength;
|
||||
|
||||
try {
|
||||
await this.outgoingHandler(buffer, this.offset - bufferLength);
|
||||
} catch (err) {
|
||||
this.emitter.emit("error", err);
|
||||
return;
|
||||
}
|
||||
|
||||
this.executingOutgoingHandlers--;
|
||||
this.reuseBuffer(buffer);
|
||||
this.emitter.emit("checkEnd");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return buffer used by outgoing handler into incoming.
|
||||
*
|
||||
* @private
|
||||
* @param {Buffer} buffer
|
||||
* @memberof BufferScheduler
|
||||
*/
|
||||
private reuseBuffer(buffer: Buffer) {
|
||||
this.incoming.push(buffer);
|
||||
if (!this.isError && this.resolveData() && !this.isStreamEnd) {
|
||||
this.readable.resume();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
export const SDK_VERSION: string = "10.1.0-preview";
|
||||
export const SERVICE_VERSION: string = "2018-03-28";
|
||||
|
||||
export const BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES: number = 256 * 1024 * 1024; // 256MB
|
||||
export const BLOCK_BLOB_MAX_STAGE_BLOCK_BYTES: number = 100 * 1024 * 1024; // 100MB
|
||||
export const BLOCK_BLOB_MAX_BLOCKS: number = 50000;
|
||||
export const BLOB_DEFAULT_DOWNLOAD_BLOCK_BYTES: number = 4 * 1024 * 1024; // 4MB
|
||||
|
||||
export const URLConstants = {
|
||||
Parameters: {
|
||||
FORCE_BROWSER_NO_CACHE: "_",
|
||||
SIGNATURE: "sig",
|
||||
SNAPSHOT: "snapshot",
|
||||
TIMEOUT: "timeout"
|
||||
}
|
||||
};
|
||||
|
||||
export const HTTPURLConnection = {
|
||||
HTTP_CONFLICT: 409,
|
||||
HTTP_NOT_FOUND: 404,
|
||||
HTTP_PRECON_FAILED: 412,
|
||||
HTTP_RANGE_NOT_SATISFIABLE: 416
|
||||
};
|
||||
|
||||
export const HeaderConstants = {
|
||||
AUTHORIZATION: "authorization",
|
||||
AUTHORIZATION_SCHEME: "Bearer",
|
||||
CONTENT_ENCODING: "content-encoding",
|
||||
CONTENT_LANGUAGE: "content-language",
|
||||
CONTENT_LENGTH: "content-length",
|
||||
CONTENT_MD5: "content-md5",
|
||||
CONTENT_TYPE: "content-type",
|
||||
COOKIE: "Cookie",
|
||||
DATE: "date",
|
||||
IF_MATCH: "if-match",
|
||||
IF_MODIFIED_SINCE: "if-modified-since",
|
||||
IF_NONE_MATCH: "if-none-match",
|
||||
IF_UNMODIFIED_SINCE: "if-unmodified-since",
|
||||
PREFIX_FOR_STORAGE: "x-ms-",
|
||||
RANGE: "Range",
|
||||
USER_AGENT: "User-Agent",
|
||||
X_MS_CLIENT_REQUEST_ID: "x-ms-client-request-id",
|
||||
X_MS_DATE: "x-ms-date"
|
||||
};
|
||||
|
||||
export const ETagNone = "";
|
||||
export const ETagAny = "*";
|
|
@ -0,0 +1,17 @@
|
|||
/**
|
||||
* Convert a Browser Blob object into ArrayBuffer.
|
||||
*
|
||||
* @export
|
||||
* @param {Blob} blob
|
||||
* @returns {Promise<ArrayBuffer>}
|
||||
*/
|
||||
export async function blobToArrayBuffer(blob: Blob): Promise<ArrayBuffer> {
|
||||
const fileReader = new FileReader();
|
||||
return new Promise<ArrayBuffer>((resolve, reject) => {
|
||||
fileReader.onloadend = (ev: any) => {
|
||||
resolve(ev.target!.result);
|
||||
};
|
||||
fileReader.onerror = reject;
|
||||
fileReader.readAsArrayBuffer(blob);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
import { isNode, URLBuilder } from "ms-rest-js";
|
||||
|
||||
/**
|
||||
* Append a string to URL path. Will remove duplicated "/" in front of the string
|
||||
* when URL path ends with a "/".
|
||||
*
|
||||
* @export
|
||||
* @param {string} url Source URL string
|
||||
* @param {string} name String to be appended to URL
|
||||
* @returns {string} An updated URL string
|
||||
*/
|
||||
export function appendToURLPath(url: string, name: string): string {
|
||||
const urlParsed = URLBuilder.parse(url);
|
||||
|
||||
let path = urlParsed.getPath();
|
||||
path = path
|
||||
? path.endsWith("/")
|
||||
? `${path}${name}`
|
||||
: `${path}/${name}`
|
||||
: name;
|
||||
urlParsed.setPath(path);
|
||||
|
||||
return urlParsed.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set URL parameter name and value. If name exists in URL parameters, old value
|
||||
* will be replaced by name key. If not provide value, the parameter will be deleted.
|
||||
*
|
||||
* @export
|
||||
* @param {string} url Source URL string
|
||||
* @param {string} name Parameter name
|
||||
* @param {string} [value] Parameter value
|
||||
* @returns {string} An updated URL string
|
||||
*/
|
||||
export function setURLParameter(
|
||||
url: string,
|
||||
name: string,
|
||||
value?: string
|
||||
): string {
|
||||
const urlParsed = URLBuilder.parse(url);
|
||||
urlParsed.setQueryParameter(name, value);
|
||||
return urlParsed.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get URL parameter by name.
|
||||
*
|
||||
* @export
|
||||
* @param {string} url
|
||||
* @param {string} name
|
||||
* @returns {(string | string[] | undefined)}
|
||||
*/
|
||||
export function getURLParameter(
|
||||
url: string,
|
||||
name: string
|
||||
): string | string[] | undefined {
|
||||
const urlParsed = URLBuilder.parse(url);
|
||||
return urlParsed.getQueryParameterValue(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set URL host.
|
||||
*
|
||||
* @export
|
||||
* @param {string} url Source URL string
|
||||
* @param {string} host New host string
|
||||
* @returns An updated URL string
|
||||
*/
|
||||
export function setURLHost(url: string, host: string): string {
|
||||
const urlParsed = URLBuilder.parse(url);
|
||||
urlParsed.setHost(host);
|
||||
return urlParsed.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get URL path from an URL string.
|
||||
*
|
||||
* @export
|
||||
* @param {string} url Source URL string
|
||||
* @returns {(string | undefined)}
|
||||
*/
|
||||
export function getURLPath(url: string): string | undefined {
|
||||
const urlParsed = URLBuilder.parse(url);
|
||||
return urlParsed.getPath();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get URL query key value pairs from an URL string.
|
||||
*
|
||||
* @export
|
||||
* @param {string} url
|
||||
* @returns {{[key: string]: string}}
|
||||
*/
|
||||
export function getURLQueries(url: string): { [key: string]: string } {
|
||||
let queryString = URLBuilder.parse(url).getQuery();
|
||||
if (!queryString) {
|
||||
return {};
|
||||
}
|
||||
|
||||
queryString = queryString.trim();
|
||||
queryString = queryString.startsWith("?")
|
||||
? queryString.substr(1)
|
||||
: queryString;
|
||||
|
||||
let querySubStrings: string[] = queryString.split("&");
|
||||
querySubStrings = querySubStrings.filter((value: string) => {
|
||||
const indexOfEqual = value.indexOf("=");
|
||||
const lastIndexOfEqual = value.lastIndexOf("=");
|
||||
return (
|
||||
indexOfEqual > 0 &&
|
||||
indexOfEqual === lastIndexOfEqual &&
|
||||
lastIndexOfEqual < value.length - 1
|
||||
);
|
||||
});
|
||||
|
||||
const queries: { [key: string]: string } = {};
|
||||
for (const querySubString of querySubStrings) {
|
||||
const splitResults = querySubString.split("=");
|
||||
const key: string = splitResults[0];
|
||||
const value: string = splitResults[1];
|
||||
queries[key] = value;
|
||||
}
|
||||
|
||||
return queries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rounds a date off to seconds.
|
||||
*
|
||||
* @export
|
||||
* @param {Date} date Input date
|
||||
* @returns {string} Date string in ISO8061 format, with no milliseconds component
|
||||
*/
|
||||
export function truncatedISO8061Date(date: Date): string {
|
||||
const dateString = date.toISOString();
|
||||
return dateString.substring(0, dateString.length - 1) + "0000" + "Z";
|
||||
}
|
||||
|
||||
/**
|
||||
* Base64 encode.
|
||||
*
|
||||
* @export
|
||||
* @param {string} content
|
||||
* @returns {string}
|
||||
*/
|
||||
export function base64encode(content: string): string {
|
||||
return !isNode ? btoa(content) : Buffer.from(content).toString("base64");
|
||||
}
|
||||
|
||||
/**
|
||||
* Base64 decode.
|
||||
*
|
||||
* @export
|
||||
* @param {string} encodedString
|
||||
* @returns {string}
|
||||
*/
|
||||
export function base64decode(encodedString: string): string {
|
||||
return !isNode
|
||||
? atob(encodedString)
|
||||
: Buffer.from(encodedString, "base64").toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a 64 bytes base64 block ID string.
|
||||
*
|
||||
* @export
|
||||
* @param {number} blockIndex
|
||||
* @returns {string}
|
||||
*/
|
||||
export function generateBlockID(
|
||||
blockIDPrefix: string,
|
||||
blockIndex: number
|
||||
): string {
|
||||
// To generate a 64 bytes base64 string, source string should be 48
|
||||
const maxSourceStringLength = 48;
|
||||
|
||||
// A blob can have a maximum of 100,000 uncommitted blocks at any given time
|
||||
const maxBlockIndexLength = 6;
|
||||
|
||||
const maxAllowedBlockIDPrefixLength =
|
||||
maxSourceStringLength - maxBlockIndexLength;
|
||||
|
||||
if (blockIDPrefix.length > maxAllowedBlockIDPrefixLength) {
|
||||
blockIDPrefix = blockIDPrefix.slice(0, maxAllowedBlockIDPrefixLength);
|
||||
}
|
||||
const res =
|
||||
blockIDPrefix +
|
||||
padStart(
|
||||
blockIndex.toString(),
|
||||
maxSourceStringLength - blockIDPrefix.length,
|
||||
"0"
|
||||
);
|
||||
return base64encode(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* String.prototype.padStart()
|
||||
*
|
||||
* @export
|
||||
* @param {string} currentString
|
||||
* @param {number} targetLength
|
||||
* @param {string} [padString=" "]
|
||||
* @returns {string}
|
||||
*/
|
||||
export function padStart(
|
||||
currentString: string,
|
||||
targetLength: number,
|
||||
padString: string = " "
|
||||
): string {
|
||||
if (String.prototype.padStart) {
|
||||
return currentString.padStart(targetLength, padString);
|
||||
}
|
||||
|
||||
padString = padString || " ";
|
||||
if (currentString.length > targetLength) {
|
||||
return currentString;
|
||||
} else {
|
||||
targetLength = targetLength - currentString.length;
|
||||
if (targetLength > padString.length) {
|
||||
padString += padString.repeat(targetLength / padString.length);
|
||||
}
|
||||
return padString.slice(0, targetLength) + currentString;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
/**
|
||||
* Reads a readable stream into buffer. Fill the buffer from offset to end.
|
||||
*
|
||||
* @export
|
||||
* @param {NodeJS.ReadableStream} stream A Node.js Readable stream
|
||||
* @param {Buffer} buffer Buffer to be filled, length must >= offset
|
||||
* @param {number} offset From which position in the buffer to be filled, inclusive
|
||||
* @param {number} end To which position in the buffer to be filled, exclusive
|
||||
* @param {string} [encoding] Encoding of the Readable stream
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function streamToBuffer(
|
||||
stream: NodeJS.ReadableStream,
|
||||
buffer: Buffer,
|
||||
offset: number,
|
||||
end: number,
|
||||
encoding?: string
|
||||
): Promise<void> {
|
||||
let pos = 0; // Position in stream
|
||||
const count = end - offset; // Total amount of data needed in stream
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
stream.on("readable", () => {
|
||||
if (pos >= count) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
let chunk = stream.read();
|
||||
if (!chunk) {
|
||||
return;
|
||||
}
|
||||
if (typeof chunk === "string") {
|
||||
chunk = Buffer.from(chunk, encoding);
|
||||
}
|
||||
|
||||
// How much data needed in this chunk
|
||||
const chunkLength =
|
||||
pos + chunk.length > count ? count - pos : chunk.length;
|
||||
|
||||
buffer.fill(
|
||||
chunk.slice(0, chunkLength),
|
||||
offset + pos,
|
||||
offset + pos + chunkLength
|
||||
);
|
||||
pos += chunkLength;
|
||||
});
|
||||
|
||||
stream.on("end", () => {
|
||||
if (pos < count) {
|
||||
reject(
|
||||
new Error(
|
||||
`Stream drains before getting enough data needed. Data read: ${pos}, data need: ${count}`
|
||||
)
|
||||
);
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
|
||||
stream.on("error", reject);
|
||||
});
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,83 @@
|
|||
{
|
||||
"name": "@azure/storage-file",
|
||||
"version": "10.0.0-preview",
|
||||
"description": "Microsoft Azure Storage SDK for JavaScript - File",
|
||||
"main": "./dist/index.js",
|
||||
"module": "./dist-esm/lib/index.js",
|
||||
"browser": {
|
||||
"./dist/index.js": "./browser/azure-storage.file.min.js",
|
||||
"./dist-esm/lib/index.js": "./dist-esm/lib/index.browser.js",
|
||||
"./dist-esm/test/utils/index.js": "./dist-esm/test/utils/index.browser.js",
|
||||
"os": false,
|
||||
"process": false
|
||||
},
|
||||
"types": "./typings/lib/index.d.ts",
|
||||
"engines": {
|
||||
"node": ">=6.5.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"events": "3.0.0",
|
||||
"ms-rest-js": "0.22.425",
|
||||
"tslib": "^1.9.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/mocha": "^5.2.5",
|
||||
"@types/node": "^10.12.3",
|
||||
"assert": "^1.4.1",
|
||||
"es6-promise": "^4.2.4",
|
||||
"gulp": "^4.0.0",
|
||||
"gulp-zip": "^4.2.0",
|
||||
"karma": "^3.0.0",
|
||||
"karma-chrome-launcher": "^2.2.0",
|
||||
"karma-edge-launcher": "^0.4.2",
|
||||
"karma-env-preprocessor": "^0.1.1",
|
||||
"karma-firefox-launcher": "^1.1.0",
|
||||
"karma-ie-launcher": "^1.0.0",
|
||||
"karma-mocha": "^1.3.0",
|
||||
"karma-mocha-reporter": "^2.2.5",
|
||||
"mocha": "^5.2.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"rollup": "^0.65.2",
|
||||
"rollup-plugin-commonjs": "^9.1.8",
|
||||
"rollup-plugin-multi-entry": "^2.0.2",
|
||||
"rollup-plugin-node-resolve": "^3.4.0",
|
||||
"rollup-plugin-replace": "^2.0.0",
|
||||
"rollup-plugin-shim": "^1.0.0",
|
||||
"rollup-plugin-uglify": "^5.0.2",
|
||||
"rollup-plugin-visualizer": "^0.9.0",
|
||||
"ts-node": "^7.0.1",
|
||||
"typescript": "^3.1.6"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "npm run test:node && npm run test:browser",
|
||||
"test:node": "npm run build:test && mocha --no-timeouts dist-test/index.js",
|
||||
"test:browser": "npm run build:test && karma start --single-run",
|
||||
"build": "npm run build:es6 && npm run build:nodebrowser && npm run build:browserzip",
|
||||
"build:test": "rollup -c rollup.test.config.js",
|
||||
"build:nodebrowser": "rollup -c",
|
||||
"build:es6": "tsc -p tsconfig.json",
|
||||
"build:autorest": "autorest ./swagger/README.md --typescript --use=@microsoft.azure/autorest.typescript@2.0.476",
|
||||
"build:browserzip": "gulp zip",
|
||||
"clean": "rimraf dist dist-esm dist-test typings temp browser/*.js* browser/*.zip statistics.html"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/Azure/azure-storage-js.git"
|
||||
},
|
||||
"keywords": [
|
||||
"Azure",
|
||||
"Storage",
|
||||
"File",
|
||||
"Node.js",
|
||||
"TypeScript",
|
||||
"JavaScript",
|
||||
"Browser"
|
||||
],
|
||||
"author": "Microsoft Corporation",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/Azure/azure-storage-js/issues"
|
||||
},
|
||||
"homepage": "https://github.com/Azure/azure-storage-js#readme",
|
||||
"sideEffects": false
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
import nodeResolve from "rollup-plugin-node-resolve";
|
||||
import { uglify } from "rollup-plugin-uglify";
|
||||
import replace from "rollup-plugin-replace";
|
||||
import commonjs from "rollup-plugin-commonjs";
|
||||
import shim from "rollup-plugin-shim";
|
||||
import visualizer from "rollup-plugin-visualizer";
|
||||
|
||||
const version = require("./package.json").version;
|
||||
const banner = [
|
||||
"/*!",
|
||||
` * Azure Storage SDK for JavaScript - File, ${version}`,
|
||||
" * Copyright (c) Microsoft and contributors. All rights reserved.",
|
||||
" */"
|
||||
].join("\n");
|
||||
|
||||
const nodeRollupConfigFactory = () => {
|
||||
return {
|
||||
external: ["ms-rest-js", "crypto", "fs", "events", "os"],
|
||||
input: "dist-esm/lib/index.js",
|
||||
output: {
|
||||
file: "dist/index.js",
|
||||
format: "cjs",
|
||||
sourcemap: true
|
||||
},
|
||||
plugins: [nodeResolve(), uglify()]
|
||||
};
|
||||
};
|
||||
|
||||
const browserRollupConfigFactory = isProduction => {
|
||||
const browserRollupConfig = {
|
||||
input: "dist-esm/lib/index.browser.js",
|
||||
output: {
|
||||
file: "browser/azure-storage.file.js",
|
||||
banner: banner,
|
||||
format: "umd",
|
||||
name: "azfile",
|
||||
sourcemap: true
|
||||
},
|
||||
plugins: [
|
||||
replace({
|
||||
delimiters: ["", ""],
|
||||
values: {
|
||||
// replace dynamic checks with if (false) since this is for
|
||||
// browser only. Rollup's dead code elimination will remove
|
||||
// any code guarded by if (isNode) { ... }
|
||||
"if (isNode)": "if (false)"
|
||||
}
|
||||
}),
|
||||
// os is not used by the browser bundle, so just shim it
|
||||
shim({
|
||||
os: `
|
||||
export const type = 1;
|
||||
export const release = 1;
|
||||
`
|
||||
}),
|
||||
nodeResolve({
|
||||
module: true,
|
||||
browser: true,
|
||||
preferBuiltins: false
|
||||
}),
|
||||
commonjs({
|
||||
namedExports: {
|
||||
events: ["EventEmitter"],
|
||||
assert: ["ok", "deepEqual", "equal", "fail", "deepStrictEqual"]
|
||||
}
|
||||
})
|
||||
]
|
||||
};
|
||||
|
||||
if (isProduction) {
|
||||
browserRollupConfig.output.file = "browser/azure-storage.file.min.js";
|
||||
browserRollupConfig.plugins.push(
|
||||
uglify({
|
||||
output: {
|
||||
preamble: banner
|
||||
}
|
||||
}),
|
||||
visualizer({
|
||||
filename: "./statistics.html",
|
||||
sourcemap: true
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
return browserRollupConfig;
|
||||
};
|
||||
|
||||
export default [
|
||||
nodeRollupConfigFactory(),
|
||||
browserRollupConfigFactory(true),
|
||||
browserRollupConfigFactory(false)
|
||||
];
|
|
@ -0,0 +1,16 @@
|
|||
import multi from "rollup-plugin-multi-entry";
|
||||
import baseConfig from "./rollup.config";
|
||||
const [node, browser] = baseConfig;
|
||||
|
||||
node.input = ["dist-esm/test/*.js", "dist-esm/test/node/*.js"];
|
||||
node.output.file = "dist-test/index.js";
|
||||
node.plugins.unshift(multi());
|
||||
node.external.push("assert", "path");
|
||||
node.context = "null";
|
||||
|
||||
browser.input = ["dist-esm/test/*.js", "dist-esm/test/browser/*.js"];
|
||||
browser.output.file = "dist-test/index.browser.js";
|
||||
browser.plugins.unshift(multi());
|
||||
browser.context = "null";
|
||||
|
||||
export default [node, browser];
|
|
@ -0,0 +1,115 @@
|
|||
// Steps to run this sample
|
||||
// 1. npm install
|
||||
// 2. Enter your storage account name and shared key in main()
|
||||
|
||||
const {
|
||||
Aborter,
|
||||
BlobURL,
|
||||
BlockBlobURL,
|
||||
ContainerURL,
|
||||
ServiceURL,
|
||||
StorageURL,
|
||||
SharedKeyCredential,
|
||||
AnonymousCredential,
|
||||
TokenCredential
|
||||
} = require(".."); // Change to "@azure/storage-blob" in your package
|
||||
|
||||
async function main() {
|
||||
// Enter your storage account name and shared key
|
||||
const account = "account";
|
||||
const accountKey = "accountkey";
|
||||
|
||||
// Use SharedKeyCredential with storage account and account key
|
||||
const sharedKeyCredential = new SharedKeyCredential(account, accountKey);
|
||||
|
||||
// Use TokenCredential with OAuth token
|
||||
const tokenCredential = new TokenCredential("token");
|
||||
tokenCredential.token = "renewedToken"; // Renew the token by updating token filed of token credential
|
||||
|
||||
// Use AnonymousCredential when url already includes a SAS signature
|
||||
const anonymousCredential = new AnonymousCredential();
|
||||
|
||||
// Use sharedKeyCredential, tokenCredential or AnonymousCredential to create a pipeline
|
||||
const pipeline = StorageURL.newPipeline(sharedKeyCredential);
|
||||
|
||||
// List containers
|
||||
const serviceURL = new ServiceURL(
|
||||
// When using AnonymousCredential, following url should include a valid SAS or support public access
|
||||
`https://${account}.blob.core.windows.net`,
|
||||
pipeline
|
||||
);
|
||||
|
||||
let marker;
|
||||
do {
|
||||
const listContainersResponse = await serviceURL.listContainersSegment(
|
||||
Aborter.none,
|
||||
marker
|
||||
);
|
||||
|
||||
marker = listContainersResponse.marker;
|
||||
for (const container of listContainersResponse.containerItems) {
|
||||
console.log(`Container: ${container.name}`);
|
||||
}
|
||||
} while (marker);
|
||||
|
||||
// Create a container
|
||||
const containerName = `newcontainer${new Date().getTime()}`;
|
||||
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
|
||||
const createContainerResponse = await containerURL.create(Aborter.none);
|
||||
console.log(
|
||||
`Create container ${containerName} successfully`,
|
||||
createContainerResponse.requestId
|
||||
);
|
||||
|
||||
// Create a blob
|
||||
const content = "hello";
|
||||
const blobName = "newblob" + new Date().getTime();
|
||||
const blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
const uploadBlobResponse = await blockBlobURL.upload(
|
||||
Aborter.none,
|
||||
content,
|
||||
content.length
|
||||
);
|
||||
console.log(
|
||||
`Upload block blob ${blobName} successfully`,
|
||||
uploadBlobResponse.requestId
|
||||
);
|
||||
|
||||
// List blobs
|
||||
do {
|
||||
const listBlobsResponse = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
marker
|
||||
);
|
||||
|
||||
marker = listBlobsResponse.marker;
|
||||
for (const blob of listBlobsResponse.segment.blobItems) {
|
||||
console.log(`Blob: ${blob.name}`);
|
||||
}
|
||||
} while (marker);
|
||||
|
||||
// Get blob content from position 0 to the end
|
||||
// In Node.js, get downloaded data by accessing downloadBlockBlobResponse.readableStreamBody
|
||||
// In browsers, get downloaded data by accessing downloadBlockBlobResponse.blobBody
|
||||
const downloadBlockBlobResponse = await blobURL.download(Aborter.none, 0);
|
||||
console.log(
|
||||
"Downloaded blob content",
|
||||
downloadBlockBlobResponse.readableStreamBody.read(content.length).toString()
|
||||
);
|
||||
|
||||
// Delete container
|
||||
await containerURL.delete(Aborter.none);
|
||||
|
||||
console.log("deleted container");
|
||||
}
|
||||
|
||||
// An async method returns a Promise object, which is compatible with then().catch() coding style.
|
||||
main()
|
||||
.then(() => {
|
||||
console.log("Successfully executed sample.");
|
||||
})
|
||||
.catch(err => {
|
||||
console.log(err.message);
|
||||
});
|
|
@ -0,0 +1,112 @@
|
|||
// Steps to run this sample
|
||||
// 1. npm install
|
||||
// 2. Enter your storage account name, SAS and a path pointing to local file in main()
|
||||
|
||||
const fs = require("fs");
|
||||
const {
|
||||
AnonymousCredential,
|
||||
uploadBrowserDataToBlockBlob,
|
||||
downloadBlobToBuffer,
|
||||
uploadFileToBlockBlob,
|
||||
uploadStreamToBlockBlob,
|
||||
Aborter,
|
||||
BlobURL,
|
||||
BlockBlobURL,
|
||||
ContainerURL,
|
||||
ServiceURL,
|
||||
StorageURL
|
||||
} = require(".."); // Change to "@azure/storage-blob" in your package
|
||||
|
||||
async function main() {
|
||||
// Fill in following settings before running this sample
|
||||
const account = "account";
|
||||
const accountSas = "accountSas";
|
||||
const localFilePath = "localFilePath";
|
||||
|
||||
const pipeline = StorageURL.newPipeline(new AnonymousCredential(), {
|
||||
// httpClient: MyHTTPClient, // A customized HTTP client implementing IHTTPClient interface
|
||||
// logger: MyLogger, // A customized logger implementing IHTTPPipelineLogger interface
|
||||
retryOptions: { maxTries: 4 }, // Retry options
|
||||
telemetry: { value: "HighLevelSample V1.0.0" } // Customized telemetry string
|
||||
});
|
||||
|
||||
const serviceURL = new ServiceURL(
|
||||
`https://${account}.blob.core.windows.net${accountSas}`,
|
||||
pipeline
|
||||
);
|
||||
|
||||
// Create a container
|
||||
const containerName = `newcontainer${new Date().getTime()}`;
|
||||
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
|
||||
// Create a blob
|
||||
const blobName = "newblob" + new Date().getTime();
|
||||
const blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
|
||||
// Parallel uploading with uploadFileToBlockBlob in Node.js runtime
|
||||
// uploadFileToBlockBlob is only available in Node.js
|
||||
await uploadFileToBlockBlob(Aborter.none, localFilePath, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024, // 4MB block size
|
||||
parallelism: 20, // 20 concurrency
|
||||
progress: ev => console.log(ev)
|
||||
});
|
||||
console.log("uploadFileToBlockBlob success");
|
||||
|
||||
// Parallel uploading a Readable stream with uploadStreamToBlockBlob in Node.js runtime
|
||||
// uploadStreamToBlockBlob is only available in Node.js
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.timeout(30 * 60 * 60 * 1000), // Abort uploading with timeout in 30mins
|
||||
fs.createReadStream(localFilePath),
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20,
|
||||
{
|
||||
progress: ev => console.log(ev)
|
||||
}
|
||||
);
|
||||
console.log("uploadStreamToBlockBlob success");
|
||||
|
||||
// Parallel uploading a browser File/Blob/ArrayBuffer in browsers with uploadBrowserDataToBlockBlob
|
||||
// Uncomment following code in browsers because uploadBrowserDataToBlockBlob is only available in browsers
|
||||
/*
|
||||
const browserFile = document.getElementById("fileinput").files[0];
|
||||
await uploadBrowserDataToBlockBlob(Aborter.none, browserFile, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024, // 4MB block size
|
||||
parallelism: 20, // 20 concurrency
|
||||
progress: ev => console.log(ev)
|
||||
});
|
||||
*/
|
||||
|
||||
// Parallel downloading a block blob into Node.js buffer
|
||||
// downloadBlobToBuffer is only available in Node.js
|
||||
const fileSize = fs.statSync(localFilePath).size;
|
||||
const buffer = Buffer.alloc(fileSize);
|
||||
await downloadBlobToBuffer(
|
||||
Aborter.timeout(30 * 60 * 60 * 1000),
|
||||
buffer,
|
||||
blockBlobURL,
|
||||
0,
|
||||
undefined,
|
||||
{
|
||||
blockSize: 4 * 1024 * 1024, // 4MB block size
|
||||
parallelism: 20, // 20 concurrency
|
||||
progress: ev => console.log(ev)
|
||||
}
|
||||
);
|
||||
console.log("downloadBlobToBuffer success");
|
||||
|
||||
// Delete container
|
||||
await containerURL.delete(Aborter.none);
|
||||
console.log("deleted container");
|
||||
}
|
||||
|
||||
// An async method returns a Promise object, which is compatible with then().catch() coding style.
|
||||
main()
|
||||
.then(() => {
|
||||
console.log("Successfully executed sample.");
|
||||
})
|
||||
.catch(err => {
|
||||
console.log(err.message);
|
||||
});
|
|
@ -0,0 +1,16 @@
|
|||
# Azure Storage TypeScript Protocol Layer
|
||||
|
||||
> see https://aka.ms/autorest
|
||||
|
||||
```yaml
|
||||
package-name: azure-storage-file
|
||||
title: StorageClient
|
||||
description: Storage Client
|
||||
enable-xml: true
|
||||
generate-metadata: false
|
||||
license-header: MICROSOFT_MIT_NO_VERSION
|
||||
output-folder: ../lib/generated
|
||||
input-file: ./file-storage-2018-03-28.json
|
||||
model-date-time-as-string: true
|
||||
optional-response-headers: true
|
||||
```
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,63 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import { getBSU, getUniqueName } from "./utils";
|
||||
|
||||
// tslint:disable:no-empty
|
||||
describe("Aborter", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
});
|
||||
|
||||
it("Should not abort after calling abort()", async () => {
|
||||
await containerURL.create(Aborter.none);
|
||||
});
|
||||
|
||||
it("Should abort when calling abort() before request finishes", async () => {
|
||||
const aborter = Aborter.none;
|
||||
const response = containerURL.create(aborter);
|
||||
aborter.abort();
|
||||
try {
|
||||
await response;
|
||||
assert.fail();
|
||||
} catch (err) {}
|
||||
});
|
||||
|
||||
it("Should not abort when calling abort() after request finishes", async () => {
|
||||
const aborter = Aborter.none;
|
||||
await containerURL.create(aborter);
|
||||
aborter.abort();
|
||||
});
|
||||
|
||||
it("Should abort after aborter timeout", async () => {
|
||||
try {
|
||||
await containerURL.create(Aborter.timeout(1));
|
||||
assert.fail();
|
||||
} catch (err) {}
|
||||
});
|
||||
|
||||
it("Should abort after father aborter calls abort()", async () => {
|
||||
try {
|
||||
const aborter = Aborter.none;
|
||||
const response = containerURL.create(aborter.withTimeout(10 * 60 * 1000));
|
||||
aborter.abort();
|
||||
await response;
|
||||
assert.fail();
|
||||
} catch (err) {}
|
||||
});
|
||||
|
||||
it("Should abort after father aborter timeout", async () => {
|
||||
try {
|
||||
const aborter = Aborter.timeout(1);
|
||||
const response = containerURL.create(aborter.withTimeout(10 * 60 * 1000));
|
||||
await response;
|
||||
assert.fail();
|
||||
} catch (err) {}
|
||||
});
|
||||
});
|
|
@ -0,0 +1,82 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { AppendBlobURL } from "../lib/AppendBlobURL";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import { bodyToString, getBSU, getUniqueName } from "./utils";
|
||||
|
||||
describe("AppendBlobURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let appendBlobURL = AppendBlobURL.fromContainerURL(containerURL, blobName);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
appendBlobURL = AppendBlobURL.fromContainerURL(containerURL, blobName);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("create with default parameters", async () => {
|
||||
await appendBlobURL.create(Aborter.none);
|
||||
await appendBlobURL.download(Aborter.none, 0);
|
||||
});
|
||||
|
||||
it("create with parameters configured", async () => {
|
||||
const options = {
|
||||
blobHTTPHeaders: {
|
||||
blobCacheControl: "blobCacheControl",
|
||||
blobContentDisposition: "blobContentDisposition",
|
||||
blobContentEncoding: "blobContentEncoding",
|
||||
blobContentLanguage: "blobContentLanguage",
|
||||
blobContentType: "blobContentType"
|
||||
},
|
||||
metadata: {
|
||||
key1: "vala",
|
||||
key2: "valb"
|
||||
}
|
||||
};
|
||||
await appendBlobURL.create(Aborter.none, options);
|
||||
const properties = await appendBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(
|
||||
properties.cacheControl,
|
||||
options.blobHTTPHeaders.blobCacheControl
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentDisposition,
|
||||
options.blobHTTPHeaders.blobContentDisposition
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentEncoding,
|
||||
options.blobHTTPHeaders.blobContentEncoding
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentLanguage,
|
||||
options.blobHTTPHeaders.blobContentLanguage
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentType,
|
||||
options.blobHTTPHeaders.blobContentType
|
||||
);
|
||||
assert.equal(properties.metadata!.key1, options.metadata.key1);
|
||||
assert.equal(properties.metadata!.key2, options.metadata.key2);
|
||||
});
|
||||
|
||||
it("appendBlock", async () => {
|
||||
await appendBlobURL.create(Aborter.none);
|
||||
|
||||
const content = "Hello World!";
|
||||
await appendBlobURL.appendBlock(Aborter.none, content, content.length);
|
||||
|
||||
const downloadResponse = await appendBlobURL.download(Aborter.none, 0);
|
||||
assert.equal(await bodyToString(downloadResponse, content.length), content);
|
||||
assert.equal(downloadResponse.contentLength!, content.length);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,365 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { isNode } from "ms-rest-js";
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { BlobURL } from "../lib/BlobURL";
|
||||
import { BlockBlobURL } from "../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import {
|
||||
AccessTier,
|
||||
BlobType,
|
||||
LeaseDurationType,
|
||||
LeaseStateType,
|
||||
LeaseStatusType,
|
||||
ListBlobsIncludeItem
|
||||
} from "../lib/generated/models";
|
||||
import { bodyToString, getBSU, getUniqueName, sleep } from "./utils";
|
||||
|
||||
describe("BlobURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
const content = "Hello World";
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
await blockBlobURL.upload(Aborter.none, content, content.length);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("download with with default parameters", async () => {
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(await bodyToString(result, content.length), content);
|
||||
});
|
||||
|
||||
it("download all parameters set", async () => {
|
||||
const result = await blobURL.download(Aborter.none, 0, 1, {
|
||||
rangeGetContentMD5: true
|
||||
});
|
||||
assert.deepStrictEqual(await bodyToString(result, 1), content[0]);
|
||||
});
|
||||
|
||||
it("setMetadata with new metadata set", async () => {
|
||||
const metadata = {
|
||||
a: "a",
|
||||
b: "b"
|
||||
};
|
||||
await blobURL.setMetadata(Aborter.none, { metadata });
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.deepStrictEqual(result.metadata, metadata);
|
||||
});
|
||||
|
||||
it("setMetadata with cleaning up metadata", async () => {
|
||||
const metadata = {
|
||||
a: "a",
|
||||
b: "b"
|
||||
};
|
||||
await blobURL.setMetadata(Aborter.none, { metadata });
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.deepStrictEqual(result.metadata, metadata);
|
||||
|
||||
await blobURL.setMetadata(Aborter.none);
|
||||
const result2 = await blobURL.getProperties(Aborter.none);
|
||||
assert.deepStrictEqual(result2.metadata, {});
|
||||
});
|
||||
|
||||
it("setHTTPHeaders with default parameters", async () => {
|
||||
await blobURL.setHTTPHeaders(Aborter.none, {});
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
|
||||
assert.deepStrictEqual(result.blobType, BlobType.BlockBlob);
|
||||
assert.ok(result.lastModified);
|
||||
assert.deepStrictEqual(result.metadata, {});
|
||||
assert.ok(!result.cacheControl);
|
||||
assert.ok(!result.contentType);
|
||||
assert.ok(!result.contentMD5);
|
||||
assert.ok(!result.contentEncoding);
|
||||
assert.ok(!result.contentLanguage);
|
||||
assert.ok(!result.contentDisposition);
|
||||
});
|
||||
|
||||
it("setHTTPHeaders with all parameters set", async () => {
|
||||
const headers = {
|
||||
blobCacheControl: "blobCacheControl",
|
||||
blobContentDisposition: "blobContentDisposition",
|
||||
blobContentEncoding: "blobContentEncoding",
|
||||
blobContentLanguage: "blobContentLanguage",
|
||||
blobContentMD5: isNode
|
||||
? Buffer.from([1, 2, 3, 4])
|
||||
: new Uint8Array([1, 2, 3, 4]),
|
||||
blobContentType: "blobContentType"
|
||||
};
|
||||
await blobURL.setHTTPHeaders(Aborter.none, {
|
||||
blobHTTPHeaders: headers
|
||||
});
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.deepStrictEqual(result.blobType, BlobType.BlockBlob);
|
||||
assert.ok(result.lastModified);
|
||||
assert.deepStrictEqual(result.metadata, {});
|
||||
assert.deepStrictEqual(result.cacheControl, headers.blobCacheControl);
|
||||
assert.deepStrictEqual(result.contentType, headers.blobContentType);
|
||||
assert.deepStrictEqual(result.contentMD5, headers.blobContentMD5);
|
||||
assert.deepStrictEqual(result.contentEncoding, headers.blobContentEncoding);
|
||||
assert.deepStrictEqual(result.contentLanguage, headers.blobContentLanguage);
|
||||
assert.deepStrictEqual(
|
||||
result.contentDisposition,
|
||||
headers.blobContentDisposition
|
||||
);
|
||||
});
|
||||
|
||||
it("acquireLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 30;
|
||||
await blobURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await blobURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("releaseLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = -1;
|
||||
await blobURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Infinite);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await blobURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("renewLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await blobURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await sleep(16 * 1000);
|
||||
const result2 = await blobURL.getProperties(Aborter.none);
|
||||
assert.ok(!result2.leaseDuration);
|
||||
assert.equal(result2.leaseState, LeaseStateType.Expired);
|
||||
assert.equal(result2.leaseStatus, LeaseStatusType.Unlocked);
|
||||
|
||||
await blobURL.renewLease(Aborter.none, guid);
|
||||
const result3 = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result3.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result3.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result3.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await blobURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("changeLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await blobURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
const newGuid = "3c7e72ebb4304526bc53d8ecef03798f";
|
||||
await blobURL.changeLease(Aborter.none, guid, newGuid);
|
||||
|
||||
await blobURL.getProperties(Aborter.none);
|
||||
await blobURL.releaseLease(Aborter.none, newGuid);
|
||||
});
|
||||
|
||||
it("breakLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await blobURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await blobURL.breakLease(Aborter.none, 3);
|
||||
|
||||
const result2 = await blobURL.getProperties(Aborter.none);
|
||||
assert.ok(!result2.leaseDuration);
|
||||
assert.equal(result2.leaseState, LeaseStateType.Breaking);
|
||||
assert.equal(result2.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await sleep(3 * 1000);
|
||||
|
||||
const result3 = await blobURL.getProperties(Aborter.none);
|
||||
assert.ok(!result3.leaseDuration);
|
||||
assert.equal(result3.leaseState, LeaseStateType.Broken);
|
||||
assert.equal(result3.leaseStatus, LeaseStatusType.Unlocked);
|
||||
});
|
||||
|
||||
it("delete", async () => {
|
||||
await blobURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
// The following code illustrates deleting a snapshot after creating one
|
||||
it("delete snapshot", async () => {
|
||||
const result = await blobURL.createSnapshot(Aborter.none);
|
||||
assert.ok(result.snapshot);
|
||||
|
||||
const blobSnapshotURL = blobURL.withSnapshot(result.snapshot!);
|
||||
await blobSnapshotURL.getProperties(Aborter.none);
|
||||
|
||||
await blobSnapshotURL.delete(Aborter.none);
|
||||
await blobURL.delete(Aborter.none);
|
||||
|
||||
const result2 = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Snapshots]
|
||||
}
|
||||
);
|
||||
|
||||
// Verify that the snapshot is deleted
|
||||
assert.equal(result2.segment.blobItems!.length, 0);
|
||||
});
|
||||
|
||||
it("createSnapshot", async () => {
|
||||
const result = await blobURL.createSnapshot(Aborter.none);
|
||||
assert.ok(result.snapshot);
|
||||
|
||||
const blobSnapshotURL = blobURL.withSnapshot(result.snapshot!);
|
||||
await blobSnapshotURL.getProperties(Aborter.none);
|
||||
|
||||
const result3 = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Snapshots]
|
||||
}
|
||||
);
|
||||
|
||||
// As a snapshot doesn't have leaseStatus and leaseState properties but origin blob has,
|
||||
// let assign them to undefined both for other properties' easy comparison
|
||||
// tslint:disable-next-line:max-line-length
|
||||
result3.segment.blobItems![0].properties.leaseState = result3.segment.blobItems![1].properties.leaseState = undefined;
|
||||
// tslint:disable-next-line:max-line-length
|
||||
result3.segment.blobItems![0].properties.leaseStatus = result3.segment.blobItems![1].properties.leaseStatus = undefined;
|
||||
// tslint:disable-next-line:max-line-length
|
||||
result3.segment.blobItems![0].properties.accessTier = result3.segment.blobItems![1].properties.accessTier = undefined;
|
||||
// tslint:disable-next-line:max-line-length
|
||||
result3.segment.blobItems![0].properties.accessTierInferred = result3.segment.blobItems![1].properties.accessTierInferred = undefined;
|
||||
|
||||
assert.deepStrictEqual(
|
||||
result3.segment.blobItems![0].properties,
|
||||
result3.segment.blobItems![1].properties
|
||||
);
|
||||
assert.ok(
|
||||
result3.segment.blobItems![0].snapshot ||
|
||||
result3.segment.blobItems![1].snapshot
|
||||
);
|
||||
});
|
||||
|
||||
it("undelete", async () => {
|
||||
const properties = await serviceURL.getProperties(Aborter.none);
|
||||
if (!properties.deleteRetentionPolicy!.enabled) {
|
||||
await serviceURL.setProperties(Aborter.none, {
|
||||
deleteRetentionPolicy: {
|
||||
days: 7,
|
||||
enabled: true
|
||||
}
|
||||
});
|
||||
await sleep(15 * 1000);
|
||||
}
|
||||
|
||||
await blobURL.delete(Aborter.none);
|
||||
|
||||
const result = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Deleted]
|
||||
}
|
||||
);
|
||||
assert.ok(result.segment.blobItems![0].deleted);
|
||||
|
||||
await blobURL.undelete(Aborter.none);
|
||||
const result2 = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Deleted]
|
||||
}
|
||||
);
|
||||
assert.ok(!result2.segment.blobItems![0].deleted);
|
||||
});
|
||||
|
||||
it("startCopyFromURL", async () => {
|
||||
const newBlobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName("copiedblob")
|
||||
);
|
||||
const result = await newBlobURL.startCopyFromURL(Aborter.none, blobURL.url);
|
||||
assert.ok(result.copyId);
|
||||
|
||||
const properties1 = await blobURL.getProperties(Aborter.none);
|
||||
const properties2 = await newBlobURL.getProperties(Aborter.none);
|
||||
assert.deepStrictEqual(properties1.contentMD5, properties2.contentMD5);
|
||||
assert.deepStrictEqual(properties2.copyId, result.copyId);
|
||||
assert.deepStrictEqual(properties2.copySource, blobURL.url);
|
||||
});
|
||||
|
||||
it("abortCopyFromURL should failed for a completed copy operation", async () => {
|
||||
const newBlobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName("copiedblob")
|
||||
);
|
||||
const result = await newBlobURL.startCopyFromURL(Aborter.none, blobURL.url);
|
||||
assert.ok(result.copyId);
|
||||
sleep(1 * 1000);
|
||||
|
||||
try {
|
||||
await newBlobURL.abortCopyFromURL(Aborter.none, result.copyId!);
|
||||
assert.fail(
|
||||
"AbortCopyFromURL should be failed and throw exception for an completed copy operation."
|
||||
);
|
||||
} catch (err) {
|
||||
assert.ok(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("setTier set default to cool", async () => {
|
||||
await blockBlobURL.setTier(Aborter.none, AccessTier.Cool);
|
||||
const properties = await blockBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(properties.accessTier!.toLowerCase(), "cool");
|
||||
});
|
||||
|
||||
it("setTier set archive to hot", async () => {
|
||||
await blockBlobURL.setTier(Aborter.none, AccessTier.Archive);
|
||||
let properties = await blockBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(properties.accessTier!.toLowerCase(), "archive");
|
||||
|
||||
await blockBlobURL.setTier(Aborter.none, AccessTier.Hot);
|
||||
properties = await blockBlobURL.getProperties(Aborter.none);
|
||||
if (properties.archiveStatus) {
|
||||
assert.equal(
|
||||
properties.archiveStatus.toLowerCase(),
|
||||
"rehydrate-pending-to-hot"
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
|
@ -0,0 +1,303 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { BlobURL } from "../lib/BlobURL";
|
||||
import { BlockBlobURL } from "../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import { BlockListType, PublicAccessType } from "../lib/generated/models";
|
||||
import { base64encode, bodyToString, getBSU, getUniqueName } from "./utils";
|
||||
|
||||
describe("BlockBlobURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("upload with string body and default parameters", async () => {
|
||||
const body: string = getUniqueName("randomstring");
|
||||
await blockBlobURL.upload(Aborter.none, body, body.length);
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(await bodyToString(result, body.length), body);
|
||||
});
|
||||
|
||||
it("upload with string body and all parameters set", async () => {
|
||||
const body: string = getUniqueName("randomstring");
|
||||
const options = {
|
||||
blobCacheControl: "blobCacheControl",
|
||||
blobContentDisposition: "blobContentDisposition",
|
||||
blobContentEncoding: "blobContentEncoding",
|
||||
blobContentLanguage: "blobContentLanguage",
|
||||
blobContentType: "blobContentType",
|
||||
metadata: {
|
||||
keya: "vala",
|
||||
keyb: "valb"
|
||||
}
|
||||
};
|
||||
await blockBlobURL.upload(Aborter.none, body, body.length, {
|
||||
blobHTTPHeaders: options,
|
||||
metadata: options.metadata
|
||||
});
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(await bodyToString(result, body.length), body);
|
||||
assert.deepStrictEqual(result.cacheControl, options.blobCacheControl);
|
||||
assert.deepStrictEqual(
|
||||
result.contentDisposition,
|
||||
options.blobContentDisposition
|
||||
);
|
||||
assert.deepStrictEqual(result.contentEncoding, options.blobContentEncoding);
|
||||
assert.deepStrictEqual(result.contentLanguage, options.blobContentLanguage);
|
||||
assert.deepStrictEqual(result.contentType, options.blobContentType);
|
||||
assert.deepStrictEqual(result.metadata, options.metadata);
|
||||
});
|
||||
|
||||
it("stageBlock", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("2"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
const listResponse = await blockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.Uncommitted
|
||||
);
|
||||
assert.equal(listResponse.uncommittedBlocks!.length, 2);
|
||||
assert.equal(listResponse.uncommittedBlocks![0].name, base64encode("1"));
|
||||
assert.equal(listResponse.uncommittedBlocks![0].size, body.length);
|
||||
assert.equal(listResponse.uncommittedBlocks![1].name, base64encode("2"));
|
||||
assert.equal(listResponse.uncommittedBlocks![1].size, body.length);
|
||||
});
|
||||
|
||||
it("stageBlockFromURL copy source blob as single block", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.upload(Aborter.none, body, body.length);
|
||||
|
||||
// When testing is in Node.js environment with shared key, setAccessPolicy will work
|
||||
// But in browsers testing with SAS tokens, below will throw an exception, ignore it
|
||||
try {
|
||||
await containerURL.setAccessPolicy(
|
||||
Aborter.none,
|
||||
PublicAccessType.Container
|
||||
);
|
||||
// tslint:disable-next-line:no-empty
|
||||
} catch (err) {}
|
||||
|
||||
const newBlockBlobURL = BlockBlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName("newblockblob")
|
||||
);
|
||||
await newBlockBlobURL.stageBlockFromURL(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
blockBlobURL.url,
|
||||
0
|
||||
);
|
||||
|
||||
const listResponse = await newBlockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.Uncommitted
|
||||
);
|
||||
assert.equal(listResponse.uncommittedBlocks!.length, 1);
|
||||
assert.equal(listResponse.uncommittedBlocks![0].name, base64encode("1"));
|
||||
assert.equal(listResponse.uncommittedBlocks![0].size, body.length);
|
||||
});
|
||||
|
||||
it("stageBlockFromURL copy source blob as separate blocks", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.upload(Aborter.none, body, body.length);
|
||||
|
||||
// When testing is in Node.js environment with shared key, setAccessPolicy will work
|
||||
// But in browsers testing with SAS tokens, below will throw an exception, ignore it
|
||||
try {
|
||||
await containerURL.setAccessPolicy(
|
||||
Aborter.none,
|
||||
PublicAccessType.Container
|
||||
);
|
||||
// tslint:disable-next-line:no-empty
|
||||
} catch (err) {}
|
||||
|
||||
const newBlockBlobURL = BlockBlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName("newblockblob")
|
||||
);
|
||||
await newBlockBlobURL.stageBlockFromURL(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
blockBlobURL.url,
|
||||
0,
|
||||
4
|
||||
);
|
||||
await newBlockBlobURL.stageBlockFromURL(
|
||||
Aborter.none,
|
||||
base64encode("2"),
|
||||
blockBlobURL.url,
|
||||
4,
|
||||
4
|
||||
);
|
||||
await newBlockBlobURL.stageBlockFromURL(
|
||||
Aborter.none,
|
||||
base64encode("3"),
|
||||
blockBlobURL.url,
|
||||
8,
|
||||
2
|
||||
);
|
||||
|
||||
const listResponse = await newBlockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.Uncommitted
|
||||
);
|
||||
assert.equal(listResponse.uncommittedBlocks!.length, 3);
|
||||
assert.equal(listResponse.uncommittedBlocks![0].name, base64encode("1"));
|
||||
assert.equal(listResponse.uncommittedBlocks![0].size, 4);
|
||||
assert.equal(listResponse.uncommittedBlocks![1].name, base64encode("2"));
|
||||
assert.equal(listResponse.uncommittedBlocks![1].size, 4);
|
||||
assert.equal(listResponse.uncommittedBlocks![2].name, base64encode("3"));
|
||||
assert.equal(listResponse.uncommittedBlocks![2].size, 2);
|
||||
|
||||
await newBlockBlobURL.commitBlockList(Aborter.none, [
|
||||
base64encode("1"),
|
||||
base64encode("2"),
|
||||
base64encode("3")
|
||||
]);
|
||||
|
||||
const downloadResponse = await newBlockBlobURL.download(Aborter.none, 0);
|
||||
assert.equal(await bodyToString(downloadResponse, 10), body);
|
||||
});
|
||||
|
||||
it("commitBlockList", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("2"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.commitBlockList(Aborter.none, [
|
||||
base64encode("1"),
|
||||
base64encode("2")
|
||||
]);
|
||||
const listResponse = await blockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.Committed
|
||||
);
|
||||
assert.equal(listResponse.committedBlocks!.length, 2);
|
||||
assert.equal(listResponse.committedBlocks![0].name, base64encode("1"));
|
||||
assert.equal(listResponse.committedBlocks![0].size, body.length);
|
||||
assert.equal(listResponse.committedBlocks![1].name, base64encode("2"));
|
||||
assert.equal(listResponse.committedBlocks![1].size, body.length);
|
||||
});
|
||||
|
||||
it("commitBlockList with all parameters set", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("2"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
|
||||
const options = {
|
||||
blobCacheControl: "blobCacheControl",
|
||||
blobContentDisposition: "blobContentDisposition",
|
||||
blobContentEncoding: "blobContentEncoding",
|
||||
blobContentLanguage: "blobContentLanguage",
|
||||
blobContentType: "blobContentType",
|
||||
metadata: {
|
||||
keya: "vala",
|
||||
keyb: "valb"
|
||||
}
|
||||
};
|
||||
await blockBlobURL.commitBlockList(
|
||||
Aborter.none,
|
||||
[base64encode("1"), base64encode("2")],
|
||||
{
|
||||
blobHTTPHeaders: options,
|
||||
metadata: options.metadata
|
||||
}
|
||||
);
|
||||
|
||||
const listResponse = await blockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.Committed
|
||||
);
|
||||
assert.equal(listResponse.committedBlocks!.length, 2);
|
||||
assert.equal(listResponse.committedBlocks![0].name, base64encode("1"));
|
||||
assert.equal(listResponse.committedBlocks![0].size, body.length);
|
||||
assert.equal(listResponse.committedBlocks![1].name, base64encode("2"));
|
||||
assert.equal(listResponse.committedBlocks![1].size, body.length);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, body.repeat(2).length),
|
||||
body.repeat(2)
|
||||
);
|
||||
assert.deepStrictEqual(result.cacheControl, options.blobCacheControl);
|
||||
assert.deepStrictEqual(
|
||||
result.contentDisposition,
|
||||
options.blobContentDisposition
|
||||
);
|
||||
assert.deepStrictEqual(result.contentEncoding, options.blobContentEncoding);
|
||||
assert.deepStrictEqual(result.contentLanguage, options.blobContentLanguage);
|
||||
assert.deepStrictEqual(result.contentType, options.blobContentType);
|
||||
assert.deepStrictEqual(result.metadata, options.metadata);
|
||||
});
|
||||
|
||||
it("getBlockList", async () => {
|
||||
const body = "HelloWorld";
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("1"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.stageBlock(
|
||||
Aborter.none,
|
||||
base64encode("2"),
|
||||
body,
|
||||
body.length
|
||||
);
|
||||
await blockBlobURL.commitBlockList(Aborter.none, [base64encode("2")]);
|
||||
const listResponse = await blockBlobURL.getBlockList(
|
||||
Aborter.none,
|
||||
BlockListType.All
|
||||
);
|
||||
assert.equal(listResponse.committedBlocks!.length, 1);
|
||||
assert.equal(listResponse.uncommittedBlocks!.length, 0);
|
||||
assert.equal(listResponse.committedBlocks![0].name, base64encode("2"));
|
||||
assert.equal(listResponse.committedBlocks![0].size, body.length);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,149 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../../lib/Aborter";
|
||||
import { BlobURL } from "../../lib/BlobURL";
|
||||
import { BlockBlobURL } from "../../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../../lib/ContainerURL";
|
||||
import { uploadBrowserDataToBlockBlob } from "../../lib/highlevel.browser";
|
||||
import {
|
||||
arrayBufferEqual,
|
||||
blobToArrayBuffer,
|
||||
blobToString,
|
||||
bodyToString,
|
||||
getBrowserFile,
|
||||
getBSU,
|
||||
getUniqueName,
|
||||
isIE
|
||||
} from "../utils/index.browser";
|
||||
|
||||
// tslint:disable:no-empty
|
||||
describe("Highelvel", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
let tempFile1: File;
|
||||
const tempFile1Length: number = 257 * 1024 * 1024 - 1;
|
||||
let tempFile2: File;
|
||||
const tempFile2Length: number = 1 * 1024 * 1024 - 1;
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
before(async () => {
|
||||
tempFile1 = getBrowserFile(getUniqueName("browserfile"), tempFile1Length);
|
||||
tempFile2 = getBrowserFile(getUniqueName("browserfile"), tempFile2Length);
|
||||
});
|
||||
|
||||
after(async () => {});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should abort when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
const aborter = Aborter.timeout(1);
|
||||
|
||||
try {
|
||||
await uploadBrowserDataToBlockBlob(aborter, tempFile1, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2
|
||||
});
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should abort when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
const aborter = Aborter.timeout(1);
|
||||
|
||||
try {
|
||||
await uploadBrowserDataToBlockBlob(aborter, tempFile2, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2
|
||||
});
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should update progress when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
let eventTriggered = false;
|
||||
const aborter = Aborter.none;
|
||||
|
||||
try {
|
||||
await uploadBrowserDataToBlockBlob(aborter, tempFile1, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2,
|
||||
progress: ev => {
|
||||
assert.ok(ev.loadedBytes);
|
||||
eventTriggered = true;
|
||||
aborter.abort();
|
||||
}
|
||||
});
|
||||
} catch (err) {}
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should update progress when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
let eventTriggered = false;
|
||||
const aborter = Aborter.none;
|
||||
|
||||
try {
|
||||
await uploadBrowserDataToBlockBlob(aborter, tempFile2, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2,
|
||||
progress: ev => {
|
||||
assert.ok(ev.loadedBytes);
|
||||
eventTriggered = true;
|
||||
aborter.abort();
|
||||
}
|
||||
});
|
||||
} catch (err) {}
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should success when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
await uploadBrowserDataToBlockBlob(Aborter.none, tempFile2, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2
|
||||
});
|
||||
|
||||
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
|
||||
const downloadedString = await bodyToString(downloadResponse);
|
||||
const uploadedString = await blobToString(tempFile2);
|
||||
|
||||
assert.equal(uploadedString, downloadedString);
|
||||
});
|
||||
|
||||
it("uploadBrowserDataToBlockBlob should success when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
if (isIE()) {
|
||||
assert.ok(
|
||||
true,
|
||||
"Skip this case in IE11 which doesn't have enough memory for downloading validation"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
await uploadBrowserDataToBlockBlob(Aborter.none, tempFile1, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 2
|
||||
});
|
||||
|
||||
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
|
||||
const buf1 = await blobToArrayBuffer(await downloadResponse.blobBody!);
|
||||
const buf2 = await blobToArrayBuffer(tempFile1);
|
||||
|
||||
assert.ok(arrayBufferEqual(buf1, buf2));
|
||||
});
|
||||
});
|
|
@ -0,0 +1,390 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { BlobURL } from "../lib/BlobURL";
|
||||
import { BlockBlobURL } from "../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import {
|
||||
LeaseDurationType,
|
||||
LeaseStateType,
|
||||
LeaseStatusType,
|
||||
ListBlobsIncludeItem,
|
||||
PublicAccessType
|
||||
} from "../lib/generated/models";
|
||||
import { getBSU, getUniqueName, sleep } from "./utils";
|
||||
|
||||
describe("ContainerURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("setMetadata", async () => {
|
||||
const metadata = {
|
||||
key0: "val0",
|
||||
keya: "vala",
|
||||
keyb: "valb"
|
||||
};
|
||||
await containerURL.setMetadata(Aborter.none, {
|
||||
metadata
|
||||
});
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.deepEqual(result.metadata, metadata);
|
||||
});
|
||||
|
||||
it("getProperties", async () => {
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.ok(result.eTag!.length > 0);
|
||||
assert.ok(result.lastModified);
|
||||
assert.ok(!result.leaseDuration);
|
||||
assert.equal(result.leaseState, LeaseStateType.Available);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Unlocked);
|
||||
assert.ok(result.requestId);
|
||||
assert.ok(result.version);
|
||||
assert.ok(result.date);
|
||||
assert.ok(!result.blobPublicAccess);
|
||||
});
|
||||
|
||||
it("create with default parameters", done => {
|
||||
// create() with default parameters has been tested in beforeEach
|
||||
done();
|
||||
});
|
||||
|
||||
it("create with all parameters configured", async () => {
|
||||
const cURL = ContainerURL.fromServiceURL(
|
||||
serviceURL,
|
||||
getUniqueName(containerName)
|
||||
);
|
||||
const metadata = { key: "value" };
|
||||
const access = PublicAccessType.Container;
|
||||
await cURL.create(Aborter.none, { metadata, access });
|
||||
const result = await cURL.getProperties(Aborter.none);
|
||||
assert.deepEqual(result.blobPublicAccess, access);
|
||||
assert.deepEqual(result.metadata, metadata);
|
||||
});
|
||||
|
||||
it("delete", done => {
|
||||
// delete() with default parameters has been tested in afterEach
|
||||
done();
|
||||
});
|
||||
|
||||
it("acquireLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 30;
|
||||
await containerURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await containerURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("releaseLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = -1;
|
||||
await containerURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Infinite);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await containerURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("renewLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await containerURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await sleep(16 * 1000);
|
||||
const result2 = await containerURL.getProperties(Aborter.none);
|
||||
assert.ok(!result2.leaseDuration);
|
||||
assert.equal(result2.leaseState, LeaseStateType.Expired);
|
||||
assert.equal(result2.leaseStatus, LeaseStatusType.Unlocked);
|
||||
|
||||
await containerURL.renewLease(Aborter.none, guid);
|
||||
const result3 = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result3.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result3.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result3.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await containerURL.releaseLease(Aborter.none, guid);
|
||||
});
|
||||
|
||||
it("changeLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await containerURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
const newGuid = "3c7e72ebb4304526bc53d8ecef03798f";
|
||||
await containerURL.changeLease(Aborter.none, guid, newGuid);
|
||||
|
||||
await containerURL.getProperties(Aborter.none);
|
||||
await containerURL.releaseLease(Aborter.none, newGuid);
|
||||
});
|
||||
|
||||
it("breakLease", async () => {
|
||||
const guid = "ca761232ed4211cebacd00aa0057b223";
|
||||
const duration = 15;
|
||||
await containerURL.acquireLease(Aborter.none, guid, duration);
|
||||
|
||||
const result = await containerURL.getProperties(Aborter.none);
|
||||
assert.equal(result.leaseDuration, LeaseDurationType.Fixed);
|
||||
assert.equal(result.leaseState, LeaseStateType.Leased);
|
||||
assert.equal(result.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await containerURL.breakLease(Aborter.none, 3);
|
||||
|
||||
const result2 = await containerURL.getProperties(Aborter.none);
|
||||
assert.ok(!result2.leaseDuration);
|
||||
assert.equal(result2.leaseState, LeaseStateType.Breaking);
|
||||
assert.equal(result2.leaseStatus, LeaseStatusType.Locked);
|
||||
|
||||
await sleep(3 * 1000);
|
||||
|
||||
const result3 = await containerURL.getProperties(Aborter.none);
|
||||
assert.ok(!result3.leaseDuration);
|
||||
assert.equal(result3.leaseState, LeaseStateType.Broken);
|
||||
assert.equal(result3.leaseStatus, LeaseStatusType.Unlocked);
|
||||
});
|
||||
|
||||
it("listBlobFlatSegment with default parameters", async () => {
|
||||
const blobURLs = [];
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const blobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName(`blockblob/${i}`)
|
||||
);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
await blockBlobURL.upload(Aborter.none, "", 0);
|
||||
blobURLs.push(blobURL);
|
||||
}
|
||||
|
||||
const result = await containerURL.listBlobFlatSegment(Aborter.none);
|
||||
assert.ok(result.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result.containerName));
|
||||
assert.deepStrictEqual(result.nextMarker, "");
|
||||
assert.deepStrictEqual(result.segment.blobItems!.length, blobURLs.length);
|
||||
assert.ok(blobURLs[0].url.indexOf(result.segment.blobItems![0].name));
|
||||
|
||||
for (const blob of blobURLs) {
|
||||
await blob.delete(Aborter.none);
|
||||
}
|
||||
});
|
||||
|
||||
it("listBlobFlatSegment with all parameters configured", async () => {
|
||||
const blobURLs = [];
|
||||
const prefix = "blockblob";
|
||||
const metadata = {
|
||||
keya: "a",
|
||||
keyb: "c"
|
||||
};
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const blobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName(`${prefix}/${i}`)
|
||||
);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
await blockBlobURL.upload(Aborter.none, "", 0, {
|
||||
metadata
|
||||
});
|
||||
blobURLs.push(blobURL);
|
||||
}
|
||||
|
||||
const result = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [
|
||||
ListBlobsIncludeItem.Snapshots,
|
||||
ListBlobsIncludeItem.Metadata,
|
||||
ListBlobsIncludeItem.Uncommittedblobs,
|
||||
ListBlobsIncludeItem.Copy,
|
||||
ListBlobsIncludeItem.Deleted
|
||||
],
|
||||
maxresults: 1,
|
||||
prefix
|
||||
}
|
||||
);
|
||||
assert.ok(result.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result.containerName));
|
||||
assert.deepStrictEqual(result.segment.blobItems!.length, 1);
|
||||
assert.ok(blobURLs[0].url.indexOf(result.segment.blobItems![0].name));
|
||||
assert.deepStrictEqual(result.segment.blobItems![0].metadata, metadata);
|
||||
|
||||
const result2 = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
result.nextMarker,
|
||||
{
|
||||
include: [
|
||||
ListBlobsIncludeItem.Snapshots,
|
||||
ListBlobsIncludeItem.Metadata,
|
||||
ListBlobsIncludeItem.Uncommittedblobs,
|
||||
ListBlobsIncludeItem.Copy,
|
||||
ListBlobsIncludeItem.Deleted
|
||||
],
|
||||
maxresults: 2,
|
||||
prefix
|
||||
}
|
||||
);
|
||||
|
||||
assert.ok(result2.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result2.containerName));
|
||||
assert.deepStrictEqual(result2.segment.blobItems!.length, 1);
|
||||
assert.ok(blobURLs[0].url.indexOf(result2.segment.blobItems![0].name));
|
||||
assert.deepStrictEqual(result2.segment.blobItems![0].metadata, metadata);
|
||||
|
||||
for (const blob of blobURLs) {
|
||||
await blob.delete(Aborter.none);
|
||||
}
|
||||
});
|
||||
|
||||
it("listBlobHierarchySegment with default parameters", async () => {
|
||||
const blobURLs = [];
|
||||
for (let i = 0; i < 3; i++) {
|
||||
const blobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName(`blockblob${i}/${i}`)
|
||||
);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
await blockBlobURL.upload(Aborter.none, "", 0);
|
||||
blobURLs.push(blobURL);
|
||||
}
|
||||
|
||||
const delimiter = "/";
|
||||
const result = await containerURL.listBlobHierarchySegment(
|
||||
Aborter.none,
|
||||
delimiter
|
||||
);
|
||||
assert.ok(result.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result.containerName));
|
||||
assert.deepStrictEqual(result.nextMarker, "");
|
||||
assert.deepStrictEqual(result.delimiter, delimiter);
|
||||
assert.deepStrictEqual(
|
||||
result.segment.blobPrefixes!.length,
|
||||
blobURLs.length
|
||||
);
|
||||
|
||||
for (const blob of blobURLs) {
|
||||
let i = 0;
|
||||
assert.ok(blob.url.indexOf(result.segment.blobPrefixes![i++].name));
|
||||
}
|
||||
|
||||
for (const blob of blobURLs) {
|
||||
await blob.delete(Aborter.none);
|
||||
}
|
||||
});
|
||||
|
||||
it("listBlobHierarchySegment with all parameters configured", async () => {
|
||||
const blobURLs = [];
|
||||
const prefix = "blockblob";
|
||||
const metadata = {
|
||||
keya: "a",
|
||||
keyb: "c"
|
||||
};
|
||||
const delimiter = "/";
|
||||
for (let i = 0; i < 2; i++) {
|
||||
const blobURL = BlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName(`${prefix}${i}${delimiter}${i}`)
|
||||
);
|
||||
const blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
await blockBlobURL.upload(Aborter.none, "", 0, {
|
||||
metadata
|
||||
});
|
||||
blobURLs.push(blobURL);
|
||||
}
|
||||
|
||||
const result = await containerURL.listBlobHierarchySegment(
|
||||
Aborter.none,
|
||||
delimiter,
|
||||
undefined,
|
||||
{
|
||||
include: [
|
||||
ListBlobsIncludeItem.Metadata,
|
||||
ListBlobsIncludeItem.Uncommittedblobs,
|
||||
ListBlobsIncludeItem.Copy,
|
||||
ListBlobsIncludeItem.Deleted
|
||||
],
|
||||
maxresults: 1,
|
||||
prefix
|
||||
}
|
||||
);
|
||||
assert.ok(result.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result.containerName));
|
||||
assert.deepStrictEqual(result.segment.blobPrefixes!.length, 1);
|
||||
assert.deepStrictEqual(result.segment.blobItems!.length, 0);
|
||||
assert.ok(blobURLs[0].url.indexOf(result.segment.blobPrefixes![0].name));
|
||||
|
||||
const result2 = await containerURL.listBlobHierarchySegment(
|
||||
Aborter.none,
|
||||
delimiter,
|
||||
result.nextMarker,
|
||||
{
|
||||
include: [
|
||||
ListBlobsIncludeItem.Metadata,
|
||||
ListBlobsIncludeItem.Uncommittedblobs,
|
||||
ListBlobsIncludeItem.Copy,
|
||||
ListBlobsIncludeItem.Deleted
|
||||
],
|
||||
maxresults: 2,
|
||||
prefix
|
||||
}
|
||||
);
|
||||
assert.ok(result2.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result2.containerName));
|
||||
assert.deepStrictEqual(result2.segment.blobPrefixes!.length, 1);
|
||||
assert.deepStrictEqual(result2.segment.blobItems!.length, 0);
|
||||
assert.ok(blobURLs[0].url.indexOf(result2.segment.blobPrefixes![0].name));
|
||||
|
||||
const result3 = await containerURL.listBlobHierarchySegment(
|
||||
Aborter.none,
|
||||
delimiter,
|
||||
undefined,
|
||||
{
|
||||
include: [
|
||||
ListBlobsIncludeItem.Metadata,
|
||||
ListBlobsIncludeItem.Uncommittedblobs,
|
||||
ListBlobsIncludeItem.Copy,
|
||||
ListBlobsIncludeItem.Deleted
|
||||
],
|
||||
maxresults: 2,
|
||||
prefix: `${prefix}0${delimiter}`
|
||||
}
|
||||
);
|
||||
assert.ok(result3.serviceEndpoint.length > 0);
|
||||
assert.ok(containerURL.url.indexOf(result3.containerName));
|
||||
assert.deepStrictEqual(result3.nextMarker, "");
|
||||
assert.deepStrictEqual(result3.delimiter, delimiter);
|
||||
assert.deepStrictEqual(result3.segment.blobItems!.length, 1);
|
||||
assert.deepStrictEqual(result3.segment.blobItems![0].metadata, metadata);
|
||||
assert.ok(blobURLs[0].url.indexOf(result3.segment.blobItems![0].name));
|
||||
|
||||
for (const blob of blobURLs) {
|
||||
await blob.delete(Aborter.none);
|
||||
}
|
||||
});
|
||||
});
|
|
@ -0,0 +1,41 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../../lib/Aborter";
|
||||
import { BlobURL } from "../../lib/BlobURL";
|
||||
import { BlockBlobURL } from "../../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../../lib/ContainerURL";
|
||||
import { getBSU, getUniqueName } from "../utils";
|
||||
|
||||
describe("BlockBlobURL Node.js only", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("upload with Readable stream body and default parameters", async () => {
|
||||
const body: string = getUniqueName("randomstring");
|
||||
const bodyBuffer = Buffer.from(body);
|
||||
|
||||
await blockBlobURL.upload(Aborter.none, bodyBuffer, body.length);
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
result.readableStreamBody!.read(body.length)!.toString(),
|
||||
body
|
||||
);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,50 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../../lib/Aborter";
|
||||
import { ContainerURL } from "../../lib/ContainerURL";
|
||||
import { PublicAccessType } from "../../lib/generated/models";
|
||||
import { getBSU, getUniqueName } from "../utils";
|
||||
|
||||
describe("ContainerURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("getAccessPolicy", async () => {
|
||||
const result = await containerURL.getAccessPolicy(Aborter.none);
|
||||
assert.ok(result.eTag!.length > 0);
|
||||
assert.ok(result.lastModified);
|
||||
assert.ok(result.requestId);
|
||||
assert.ok(result.version);
|
||||
assert.ok(result.date);
|
||||
});
|
||||
|
||||
it("setAccessPolicy", async () => {
|
||||
const access = PublicAccessType.Blob;
|
||||
const containerAcl = [
|
||||
{
|
||||
accessPolicy: {
|
||||
expiry: new Date("2018-12-31T11:22:33.4567890Z"),
|
||||
permission: "rwd",
|
||||
start: new Date("2017-12-31T11:22:33.4567890Z")
|
||||
},
|
||||
id: "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI="
|
||||
}
|
||||
];
|
||||
|
||||
await containerURL.setAccessPolicy(Aborter.none, access, containerAcl);
|
||||
const result = await containerURL.getAccessPolicy(Aborter.none);
|
||||
assert.deepEqual(result.signedIdentifiers, containerAcl);
|
||||
assert.deepEqual(result.blobPublicAccess, access);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,316 @@
|
|||
import * as assert from "assert";
|
||||
import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
import { Aborter } from "../../lib/Aborter";
|
||||
import {
|
||||
downloadBlobToBuffer,
|
||||
uploadFileToBlockBlob,
|
||||
uploadStreamToBlockBlob
|
||||
} from "../../lib/highlevel.node";
|
||||
import {
|
||||
createRandomLocalFile,
|
||||
getBSU,
|
||||
getUniqueName,
|
||||
readStreamToLocalFile
|
||||
} from "../utils/index";
|
||||
|
||||
import { BlobURL, BlockBlobURL, ContainerURL } from "../../lib";
|
||||
|
||||
// tslint:disable:no-empty
|
||||
describe("Highlevel", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
let tempFileSmall: string;
|
||||
let tempFileSmallLength: number;
|
||||
let tempFileLarge: string;
|
||||
let tempFileLargeLength: number;
|
||||
const tempFolderPath = "temp";
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
blockBlobURL = BlockBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
before(async () => {
|
||||
if (!fs.existsSync(tempFolderPath)) {
|
||||
fs.mkdirSync(tempFolderPath);
|
||||
}
|
||||
tempFileLarge = await createRandomLocalFile(
|
||||
tempFolderPath,
|
||||
257,
|
||||
1024 * 1024
|
||||
);
|
||||
tempFileLargeLength = 257 * 1024 * 1024;
|
||||
tempFileSmall = await createRandomLocalFile(
|
||||
tempFolderPath,
|
||||
15,
|
||||
1024 * 1024
|
||||
);
|
||||
tempFileSmallLength = 15 * 1024 * 1024;
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
fs.unlinkSync(tempFileLarge);
|
||||
fs.unlinkSync(tempFileSmall);
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should success when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
await uploadFileToBlockBlob(Aborter.none, tempFileLarge, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
});
|
||||
|
||||
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
|
||||
const downloadedFile = path.join(
|
||||
tempFolderPath,
|
||||
getUniqueName("downloadfile.")
|
||||
);
|
||||
await readStreamToLocalFile(
|
||||
downloadResponse.readableStreamBody!,
|
||||
downloadedFile
|
||||
);
|
||||
|
||||
const downloadedData = await fs.readFileSync(downloadedFile);
|
||||
const uploadedData = await fs.readFileSync(tempFileLarge);
|
||||
|
||||
fs.unlinkSync(downloadedFile);
|
||||
assert.ok(downloadedData.equals(uploadedData));
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should success when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
await uploadFileToBlockBlob(Aborter.none, tempFileSmall, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
});
|
||||
|
||||
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
|
||||
const downloadedFile = path.join(
|
||||
tempFolderPath,
|
||||
getUniqueName("downloadfile.")
|
||||
);
|
||||
await readStreamToLocalFile(
|
||||
downloadResponse.readableStreamBody!,
|
||||
downloadedFile
|
||||
);
|
||||
|
||||
const downloadedData = await fs.readFileSync(downloadedFile);
|
||||
const uploadedData = await fs.readFileSync(tempFileSmall);
|
||||
|
||||
fs.unlinkSync(downloadedFile);
|
||||
assert.ok(downloadedData.equals(uploadedData));
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should abort when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
const aborter = Aborter.timeout(1);
|
||||
|
||||
try {
|
||||
await uploadFileToBlockBlob(aborter, tempFileLarge, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
});
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should abort when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
const aborter = Aborter.timeout(1);
|
||||
|
||||
try {
|
||||
await uploadFileToBlockBlob(aborter, tempFileSmall, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
});
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should update progress when blob >= BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
let eventTriggered = false;
|
||||
const aborter = Aborter.none;
|
||||
|
||||
try {
|
||||
await uploadFileToBlockBlob(aborter, tempFileLarge, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20,
|
||||
progress: ev => {
|
||||
assert.ok(ev.loadedBytes);
|
||||
eventTriggered = true;
|
||||
aborter.abort();
|
||||
}
|
||||
});
|
||||
} catch (err) {}
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
|
||||
it("uploadFileToBlockBlob should update progress when blob < BLOCK_BLOB_MAX_UPLOAD_BLOB_BYTES", async () => {
|
||||
let eventTriggered = false;
|
||||
const aborter = Aborter.none;
|
||||
|
||||
try {
|
||||
await uploadFileToBlockBlob(aborter, tempFileSmall, blockBlobURL, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20,
|
||||
progress: ev => {
|
||||
assert.ok(ev.loadedBytes);
|
||||
eventTriggered = true;
|
||||
aborter.abort();
|
||||
}
|
||||
});
|
||||
} catch (err) {}
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
|
||||
it("uploadStreamToBlockBlob should success", async () => {
|
||||
const rs = fs.createReadStream(tempFileLarge);
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.none,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20
|
||||
);
|
||||
|
||||
const downloadResponse = await blockBlobURL.download(Aborter.none, 0);
|
||||
|
||||
const downloadFilePath = path.join("./", getUniqueName("downloadFile"));
|
||||
await readStreamToLocalFile(
|
||||
downloadResponse.readableStreamBody!,
|
||||
downloadFilePath
|
||||
);
|
||||
|
||||
const downloadedBuffer = fs.readFileSync(downloadFilePath);
|
||||
const uploadedBuffer = fs.readFileSync(tempFileLarge);
|
||||
assert.ok(uploadedBuffer.equals(downloadedBuffer));
|
||||
|
||||
fs.unlinkSync(downloadFilePath);
|
||||
});
|
||||
|
||||
it("uploadStreamToBlockBlob should abort", async () => {
|
||||
const rs = fs.createReadStream(tempFileLarge);
|
||||
const aborter = Aborter.timeout(1);
|
||||
|
||||
try {
|
||||
await uploadStreamToBlockBlob(
|
||||
aborter,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20
|
||||
);
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("uploadStreamToBlockBlob should update progress event", async () => {
|
||||
const rs = fs.createReadStream(tempFileLarge);
|
||||
let eventTriggered = false;
|
||||
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.none,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20,
|
||||
{
|
||||
progress: ev => {
|
||||
assert.ok(ev.loadedBytes);
|
||||
eventTriggered = true;
|
||||
}
|
||||
}
|
||||
);
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
|
||||
it("downloadBlobToBuffer should success", async () => {
|
||||
const rs = fs.createReadStream(tempFileLarge);
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.none,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20
|
||||
);
|
||||
|
||||
const buf = Buffer.alloc(tempFileLargeLength);
|
||||
await downloadBlobToBuffer(Aborter.none, buf, blockBlobURL, 0, undefined, {
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
});
|
||||
|
||||
const localFileContent = fs.readFileSync(tempFileLarge);
|
||||
assert.ok(localFileContent.equals(buf));
|
||||
});
|
||||
|
||||
it("downloadBlobToBuffer should abort", async () => {
|
||||
const rs = fs.createReadStream(tempFileLarge);
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.none,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
20
|
||||
);
|
||||
|
||||
try {
|
||||
const buf = Buffer.alloc(tempFileLargeLength);
|
||||
await downloadBlobToBuffer(
|
||||
Aborter.timeout(1),
|
||||
buf,
|
||||
blockBlobURL,
|
||||
0,
|
||||
undefined,
|
||||
{
|
||||
blockSize: 4 * 1024 * 1024,
|
||||
parallelism: 20
|
||||
}
|
||||
);
|
||||
assert.fail();
|
||||
} catch (err) {
|
||||
assert.ok((err.code as string).toLowerCase().includes("abort"));
|
||||
}
|
||||
});
|
||||
|
||||
it("downloadBlobToBuffer should update progress event", async () => {
|
||||
const rs = fs.createReadStream(tempFileSmall);
|
||||
await uploadStreamToBlockBlob(
|
||||
Aborter.none,
|
||||
rs,
|
||||
blockBlobURL,
|
||||
4 * 1024 * 1024,
|
||||
10
|
||||
);
|
||||
|
||||
let eventTriggered = false;
|
||||
const buf = Buffer.alloc(tempFileSmallLength);
|
||||
const aborter = Aborter.none;
|
||||
try {
|
||||
await downloadBlobToBuffer(aborter, buf, blockBlobURL, 0, undefined, {
|
||||
blockSize: 1 * 1024,
|
||||
parallelism: 1,
|
||||
progress: () => {
|
||||
eventTriggered = true;
|
||||
aborter.abort();
|
||||
}
|
||||
});
|
||||
} catch (err) {}
|
||||
assert.ok(eventTriggered);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,87 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../../lib/Aborter";
|
||||
import { BlobURL } from "../../lib/BlobURL";
|
||||
import { ContainerURL } from "../../lib/ContainerURL";
|
||||
import {
|
||||
ListBlobsIncludeItem,
|
||||
PublicAccessType
|
||||
} from "../../lib/generated/models";
|
||||
import { PageBlobURL } from "../../lib/PageBlobURL";
|
||||
import { getBSU, getUniqueName } from "../utils";
|
||||
|
||||
describe("PageBlobURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let pageBlobURL = PageBlobURL.fromBlobURL(blobURL);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
pageBlobURL = PageBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("startCopyIncremental", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024, {
|
||||
metadata: {
|
||||
sourcemeta: "val"
|
||||
}
|
||||
});
|
||||
await pageBlobURL.uploadPages(Aborter.none, "b".repeat(1024), 0, 1024);
|
||||
|
||||
let snapshotResult = await pageBlobURL.createSnapshot(Aborter.none);
|
||||
assert.ok(snapshotResult.snapshot);
|
||||
|
||||
const destPageBlobURL = PageBlobURL.fromContainerURL(
|
||||
containerURL,
|
||||
getUniqueName("page")
|
||||
);
|
||||
|
||||
await containerURL.setAccessPolicy(
|
||||
Aborter.none,
|
||||
PublicAccessType.Container
|
||||
);
|
||||
let copySource = pageBlobURL.withSnapshot(snapshotResult.snapshot!).url;
|
||||
await destPageBlobURL.startCopyIncremental(Aborter.none, copySource);
|
||||
let listBlobResponse = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Copy, ListBlobsIncludeItem.Snapshots]
|
||||
}
|
||||
);
|
||||
|
||||
assert.equal(listBlobResponse.segment.blobItems.length, 4);
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "c".repeat(1024), 0, 1024);
|
||||
snapshotResult = await pageBlobURL.createSnapshot(Aborter.none);
|
||||
assert.ok(snapshotResult.snapshot);
|
||||
copySource = pageBlobURL.withSnapshot(snapshotResult.snapshot!).url;
|
||||
await destPageBlobURL.startCopyIncremental(Aborter.none, copySource);
|
||||
|
||||
listBlobResponse = await containerURL.listBlobFlatSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: [ListBlobsIncludeItem.Copy, ListBlobsIncludeItem.Snapshots]
|
||||
}
|
||||
);
|
||||
|
||||
assert.equal(listBlobResponse.segment.blobItems.length, 6);
|
||||
|
||||
const pageBlobProperties = await destPageBlobURL.getProperties(
|
||||
Aborter.none
|
||||
);
|
||||
assert.equal(pageBlobProperties.metadata!.sourcemeta, "val");
|
||||
});
|
||||
});
|
|
@ -0,0 +1,41 @@
|
|||
import { Aborter } from "../../lib/Aborter";
|
||||
import { BlockBlobURL } from "../../lib/BlockBlobURL";
|
||||
import { ContainerURL } from "../../lib/ContainerURL";
|
||||
import { getBSU, getUniqueName } from "../utils/index";
|
||||
|
||||
describe("SharedKeyCredentialPolicy Node.js only", () => {
|
||||
const serviceURL = getBSU();
|
||||
const containerName: string = getUniqueName("1container-with-dash");
|
||||
const containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
|
||||
before(async () => {
|
||||
await containerURL.create(Aborter.none);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("SharedKeyCredentialPolicy should work with special container and blob names with spaces", async () => {
|
||||
const blobName: string = getUniqueName("blob empty");
|
||||
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
|
||||
|
||||
await blockBlobURL.upload(Aborter.none, "A", 1);
|
||||
});
|
||||
|
||||
it("SharedKeyCredentialPolicy should work with special container and blob names with /", async () => {
|
||||
const blobName: string = getUniqueName("////blob/empty /another");
|
||||
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
|
||||
|
||||
await blockBlobURL.upload(Aborter.none, "A", 1);
|
||||
await blockBlobURL.getProperties(Aborter.none);
|
||||
});
|
||||
|
||||
it("SharedKeyCredentialPolicy should work with special container and blob names uppercase", async () => {
|
||||
const blobName: string = getUniqueName("////Upper/blob/empty /another");
|
||||
const blockBlobURL = BlockBlobURL.fromContainerURL(containerURL, blobName);
|
||||
|
||||
await blockBlobURL.upload(Aborter.none, "A", 1);
|
||||
await blockBlobURL.getProperties(Aborter.none);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,199 @@
|
|||
import * as assert from "assert";
|
||||
import { bodyToString, getBSU, getUniqueName } from "./utils";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { BlobURL } from "../lib/BlobURL";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import * as Models from "../lib/generated/models";
|
||||
import { PageBlobURL } from "../lib/PageBlobURL";
|
||||
|
||||
describe("PageBlobURL", () => {
|
||||
const serviceURL = getBSU();
|
||||
let containerName: string = getUniqueName("container");
|
||||
let containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
let blobName: string = getUniqueName("blob");
|
||||
let blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
let pageBlobURL = PageBlobURL.fromBlobURL(blobURL);
|
||||
|
||||
beforeEach(async () => {
|
||||
containerName = getUniqueName("container");
|
||||
containerURL = ContainerURL.fromServiceURL(serviceURL, containerName);
|
||||
await containerURL.create(Aborter.none);
|
||||
blobName = getUniqueName("blob");
|
||||
blobURL = BlobURL.fromContainerURL(containerURL, blobName);
|
||||
pageBlobURL = PageBlobURL.fromBlobURL(blobURL);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await containerURL.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("create with default parameters", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 512);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 512),
|
||||
"\u0000".repeat(512)
|
||||
);
|
||||
});
|
||||
|
||||
it("create with all parameters set", async () => {
|
||||
const options = {
|
||||
blobHTTPHeaders: {
|
||||
blobCacheControl: "blobCacheControl",
|
||||
blobContentDisposition: "blobContentDisposition",
|
||||
blobContentEncoding: "blobContentEncoding",
|
||||
blobContentLanguage: "blobContentLanguage",
|
||||
blobContentType: "blobContentType"
|
||||
},
|
||||
metadata: {
|
||||
key1: "vala",
|
||||
key2: "valb"
|
||||
}
|
||||
};
|
||||
await pageBlobURL.create(Aborter.none, 512, options);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 512),
|
||||
"\u0000".repeat(512)
|
||||
);
|
||||
|
||||
const properties = await blobURL.getProperties(Aborter.none);
|
||||
assert.equal(
|
||||
properties.cacheControl,
|
||||
options.blobHTTPHeaders.blobCacheControl
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentDisposition,
|
||||
options.blobHTTPHeaders.blobContentDisposition
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentEncoding,
|
||||
options.blobHTTPHeaders.blobContentEncoding
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentLanguage,
|
||||
options.blobHTTPHeaders.blobContentLanguage
|
||||
);
|
||||
assert.equal(
|
||||
properties.contentType,
|
||||
options.blobHTTPHeaders.blobContentType
|
||||
);
|
||||
assert.equal(properties.metadata!.key1, options.metadata.key1);
|
||||
assert.equal(properties.metadata!.key2, options.metadata.key2);
|
||||
});
|
||||
|
||||
it("uploadPages", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.equal(await bodyToString(result, 1024), "\u0000".repeat(1024));
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "a".repeat(512), 0, 512);
|
||||
await pageBlobURL.uploadPages(Aborter.none, "b".repeat(512), 512, 512);
|
||||
|
||||
const page1 = await pageBlobURL.download(Aborter.none, 0, 512);
|
||||
const page2 = await pageBlobURL.download(Aborter.none, 512, 512);
|
||||
|
||||
assert.equal(await bodyToString(page1, 512), "a".repeat(512));
|
||||
assert.equal(await bodyToString(page2, 512), "b".repeat(512));
|
||||
});
|
||||
|
||||
it("clearPages", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024);
|
||||
let result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 1024),
|
||||
"\u0000".repeat(1024)
|
||||
);
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "a".repeat(1024), 0, 1024);
|
||||
result = await pageBlobURL.download(Aborter.none, 0, 1024);
|
||||
assert.deepStrictEqual(await bodyToString(result, 1024), "a".repeat(1024));
|
||||
|
||||
await pageBlobURL.clearPages(Aborter.none, 0, 512);
|
||||
result = await pageBlobURL.download(Aborter.none, 0, 512);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 512),
|
||||
"\u0000".repeat(512)
|
||||
);
|
||||
});
|
||||
|
||||
it("getPageRanges", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 1024),
|
||||
"\u0000".repeat(1024)
|
||||
);
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "a".repeat(512), 0, 512);
|
||||
await pageBlobURL.uploadPages(Aborter.none, "b".repeat(512), 512, 512);
|
||||
|
||||
const page1 = await pageBlobURL.getPageRanges(Aborter.none, 0, 512);
|
||||
const page2 = await pageBlobURL.getPageRanges(Aborter.none, 512, 512);
|
||||
|
||||
assert.equal(page1.pageRange![0].end, 511);
|
||||
assert.equal(page2.pageRange![0].end, 1023);
|
||||
});
|
||||
|
||||
it("getPageRangesDiff", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024);
|
||||
|
||||
const result = await blobURL.download(Aborter.none, 0);
|
||||
assert.deepStrictEqual(
|
||||
await bodyToString(result, 1024),
|
||||
"\u0000".repeat(1024)
|
||||
);
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "b".repeat(1024), 0, 1024);
|
||||
|
||||
const snapshotResult = await pageBlobURL.createSnapshot(Aborter.none);
|
||||
assert.ok(snapshotResult.snapshot);
|
||||
|
||||
await pageBlobURL.uploadPages(Aborter.none, "a".repeat(512), 0, 512);
|
||||
await pageBlobURL.clearPages(Aborter.none, 512, 512);
|
||||
|
||||
const rangesDiff = await pageBlobURL.getPageRangesDiff(
|
||||
Aborter.none,
|
||||
0,
|
||||
1024,
|
||||
snapshotResult.snapshot!
|
||||
);
|
||||
assert.equal(rangesDiff.pageRange![0].start, 0);
|
||||
assert.equal(rangesDiff.pageRange![0].end, 511);
|
||||
assert.equal(rangesDiff.clearRange![0].start, 512);
|
||||
assert.equal(rangesDiff.clearRange![0].end, 1023);
|
||||
});
|
||||
|
||||
it("updateSequenceNumber", async () => {
|
||||
await pageBlobURL.create(Aborter.none, 1024);
|
||||
let propertiesResponse = await pageBlobURL.getProperties(Aborter.none);
|
||||
|
||||
await pageBlobURL.updateSequenceNumber(
|
||||
Aborter.none,
|
||||
Models.SequenceNumberActionType.Increment
|
||||
);
|
||||
propertiesResponse = await pageBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(propertiesResponse.blobSequenceNumber!, 1);
|
||||
|
||||
await pageBlobURL.updateSequenceNumber(
|
||||
Aborter.none,
|
||||
Models.SequenceNumberActionType.Update,
|
||||
10
|
||||
);
|
||||
propertiesResponse = await pageBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(propertiesResponse.blobSequenceNumber!, 10);
|
||||
|
||||
await pageBlobURL.updateSequenceNumber(
|
||||
Aborter.none,
|
||||
Models.SequenceNumberActionType.Max,
|
||||
100
|
||||
);
|
||||
propertiesResponse = await pageBlobURL.getProperties(Aborter.none);
|
||||
assert.equal(propertiesResponse.blobSequenceNumber!, 100);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,218 @@
|
|||
import * as assert from "assert";
|
||||
|
||||
import { Aborter } from "../lib/Aborter";
|
||||
import { ContainerURL } from "../lib/ContainerURL";
|
||||
import {
|
||||
LeaseStateType,
|
||||
LeaseStatusType,
|
||||
ListContainersIncludeType
|
||||
} from "../lib/generated/models";
|
||||
import { ServiceURL } from "../lib/ServiceURL";
|
||||
import { getAlternateBSU, getBSU, getUniqueName, wait } from "./utils";
|
||||
|
||||
describe("ServiceURL", () => {
|
||||
it("ListContainers with default parameters", async () => {
|
||||
const serviceURL = getBSU();
|
||||
const result = await serviceURL.listContainersSegment(Aborter.none);
|
||||
assert.ok(typeof result.requestId);
|
||||
assert.ok(result.requestId!.length > 0);
|
||||
assert.ok(typeof result.version);
|
||||
assert.ok(result.version!.length > 0);
|
||||
|
||||
assert.ok(result.serviceEndpoint.length > 0);
|
||||
assert.ok(result.containerItems!.length >= 0);
|
||||
|
||||
if (result.containerItems!.length > 0) {
|
||||
const container = result.containerItems![0];
|
||||
assert.ok(container.name.length > 0);
|
||||
assert.ok(container.properties.etag.length > 0);
|
||||
assert.ok(container.properties.lastModified);
|
||||
}
|
||||
});
|
||||
|
||||
it("ListContainers with all parameters configured", async () => {
|
||||
const serviceURL = getBSU();
|
||||
|
||||
const containerNamePrefix = getUniqueName("container");
|
||||
const containerName1 = `${containerNamePrefix}x1`;
|
||||
const containerName2 = `${containerNamePrefix}x2`;
|
||||
const containerURL1 = ContainerURL.fromServiceURL(
|
||||
serviceURL,
|
||||
containerName1
|
||||
);
|
||||
const containerURL2 = ContainerURL.fromServiceURL(
|
||||
serviceURL,
|
||||
containerName2
|
||||
);
|
||||
await containerURL1.create(Aborter.none, { metadata: { key: "val" } });
|
||||
await containerURL2.create(Aborter.none, { metadata: { key: "val" } });
|
||||
|
||||
const result1 = await serviceURL.listContainersSegment(
|
||||
Aborter.none,
|
||||
undefined,
|
||||
{
|
||||
include: ListContainersIncludeType.Metadata,
|
||||
maxresults: 1,
|
||||
prefix: containerNamePrefix
|
||||
}
|
||||
);
|
||||
|
||||
assert.ok(result1.nextMarker);
|
||||
assert.equal(result1.containerItems!.length, 1);
|
||||
assert.ok(result1.containerItems![0].name.startsWith(containerNamePrefix));
|
||||
assert.ok(result1.containerItems![0].properties.etag.length > 0);
|
||||
assert.ok(result1.containerItems![0].properties.lastModified);
|
||||
assert.ok(!result1.containerItems![0].properties.leaseDuration);
|
||||
assert.ok(!result1.containerItems![0].properties.publicAccess);
|
||||
assert.deepEqual(
|
||||
result1.containerItems![0].properties.leaseState,
|
||||
LeaseStateType.Available
|
||||
);
|
||||
assert.deepEqual(
|
||||
result1.containerItems![0].properties.leaseStatus,
|
||||
LeaseStatusType.Unlocked
|
||||
);
|
||||
assert.deepEqual(result1.containerItems![0].metadata!.key, "val");
|
||||
|
||||
const result2 = await serviceURL.listContainersSegment(
|
||||
Aborter.none,
|
||||
result1.nextMarker,
|
||||
{
|
||||
include: ListContainersIncludeType.Metadata,
|
||||
maxresults: 1,
|
||||
prefix: containerNamePrefix
|
||||
}
|
||||
);
|
||||
|
||||
assert.ok(!result2.nextMarker);
|
||||
assert.equal(result2.containerItems!.length, 1);
|
||||
assert.ok(result2.containerItems![0].name.startsWith(containerNamePrefix));
|
||||
assert.ok(result2.containerItems![0].properties.etag.length > 0);
|
||||
assert.ok(result2.containerItems![0].properties.lastModified);
|
||||
assert.ok(!result2.containerItems![0].properties.leaseDuration);
|
||||
assert.ok(!result2.containerItems![0].properties.publicAccess);
|
||||
assert.deepEqual(
|
||||
result2.containerItems![0].properties.leaseState,
|
||||
LeaseStateType.Available
|
||||
);
|
||||
assert.deepEqual(
|
||||
result2.containerItems![0].properties.leaseStatus,
|
||||
LeaseStatusType.Unlocked
|
||||
);
|
||||
assert.deepEqual(result2.containerItems![0].metadata!.key, "val");
|
||||
|
||||
await containerURL1.delete(Aborter.none);
|
||||
await containerURL2.delete(Aborter.none);
|
||||
});
|
||||
|
||||
it("GetProperties", async () => {
|
||||
const serviceURL = getBSU();
|
||||
const result = await serviceURL.getProperties(Aborter.none);
|
||||
|
||||
assert.ok(typeof result.requestId);
|
||||
assert.ok(result.requestId!.length > 0);
|
||||
assert.ok(typeof result.version);
|
||||
assert.ok(result.version!.length > 0);
|
||||
|
||||
if (result.cors && result.cors!.length > 0) {
|
||||
assert.ok(result.cors![0].allowedHeaders.length > 0);
|
||||
assert.ok(result.cors![0].allowedMethods.length > 0);
|
||||
assert.ok(result.cors![0].allowedOrigins.length > 0);
|
||||
assert.ok(result.cors![0].exposedHeaders.length > 0);
|
||||
assert.ok(result.cors![0].maxAgeInSeconds >= 0);
|
||||
}
|
||||
});
|
||||
|
||||
it("SetProperties", async () => {
|
||||
const serviceURL = getBSU();
|
||||
|
||||
const serviceProperties = await serviceURL.getProperties(Aborter.none);
|
||||
|
||||
serviceProperties.logging = {
|
||||
deleteProperty: true,
|
||||
read: true,
|
||||
retentionPolicy: {
|
||||
days: 5,
|
||||
enabled: true
|
||||
},
|
||||
version: "1.0",
|
||||
write: true
|
||||
};
|
||||
|
||||
serviceProperties.minuteMetrics = {
|
||||
enabled: true,
|
||||
includeAPIs: true,
|
||||
retentionPolicy: {
|
||||
days: 4,
|
||||
enabled: true
|
||||
},
|
||||
version: "1.0"
|
||||
};
|
||||
|
||||
serviceProperties.hourMetrics = {
|
||||
enabled: true,
|
||||
includeAPIs: true,
|
||||
retentionPolicy: {
|
||||
days: 3,
|
||||
enabled: true
|
||||
},
|
||||
version: "1.0"
|
||||
};
|
||||
|
||||
const newCORS = {
|
||||
allowedHeaders: "*",
|
||||
allowedMethods: "GET",
|
||||
allowedOrigins: "example.com",
|
||||
exposedHeaders: "*",
|
||||
maxAgeInSeconds: 8888
|
||||
};
|
||||
if (!serviceProperties.cors) {
|
||||
serviceProperties.cors = [newCORS];
|
||||
} else if (serviceProperties.cors!.length < 5) {
|
||||
serviceProperties.cors.push(newCORS);
|
||||
}
|
||||
|
||||
if (!serviceProperties.deleteRetentionPolicy) {
|
||||
serviceProperties.deleteRetentionPolicy = {
|
||||
days: 2,
|
||||
enabled: false
|
||||
};
|
||||
}
|
||||
|
||||
await serviceURL.setProperties(Aborter.none, serviceProperties);
|
||||
await wait(5 * 1000);
|
||||
|
||||
const result = await serviceURL.getProperties(Aborter.none);
|
||||
assert.ok(typeof result.requestId);
|
||||
assert.ok(result.requestId!.length > 0);
|
||||
assert.ok(typeof result.version);
|
||||
assert.ok(result.version!.length > 0);
|
||||
assert.deepEqual(result.hourMetrics, serviceProperties.hourMetrics);
|
||||
});
|
||||
|
||||
it("getStatistics", done => {
|
||||
let serviceURL: ServiceURL | undefined;
|
||||
try {
|
||||
serviceURL = getAlternateBSU();
|
||||
} catch (err) {
|
||||
done();
|
||||
return;
|
||||
}
|
||||
|
||||
serviceURL!
|
||||
.getStatistics(Aborter.none)
|
||||
.then(result => {
|
||||
assert.ok(result.geoReplication!.lastSyncTime);
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
|
||||
it("getAccountInfo", async () => {
|
||||
const serviceURL = getBSU();
|
||||
|
||||
const accountInfo = await serviceURL.getAccountInfo(Aborter.none);
|
||||
assert.ok(accountInfo.accountKind);
|
||||
assert.ok(accountInfo.skuName);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,135 @@
|
|||
import { AnonymousCredential } from "../../lib/credentials/AnonymousCredential";
|
||||
import { ServiceURL } from "../../lib/ServiceURL";
|
||||
import { StorageURL } from "../../lib/StorageURL";
|
||||
|
||||
export * from "./testutils.common";
|
||||
|
||||
export function getGenericBSU(
|
||||
accountType: string,
|
||||
accountNameSuffix: string = ""
|
||||
): ServiceURL {
|
||||
const accountNameEnvVar = `${accountType}ACCOUNT_NAME`;
|
||||
const accountSASEnvVar = `${accountType}ACCOUNT_SAS`;
|
||||
|
||||
let accountName: string | undefined;
|
||||
let accountSAS: string | undefined;
|
||||
accountName = (window as any).__env__[accountNameEnvVar];
|
||||
accountSAS = (window as any).__env__[accountSASEnvVar];
|
||||
|
||||
if (!accountName || !accountSAS || accountName === "" || accountSAS === "") {
|
||||
throw new Error(
|
||||
`${accountNameEnvVar} and/or ${accountSASEnvVar} environment variables not specified.`
|
||||
);
|
||||
}
|
||||
|
||||
if (accountSAS) {
|
||||
accountSAS = accountSAS.startsWith("?") ? accountSAS : `?${accountSAS}`;
|
||||
}
|
||||
|
||||
const credentials = new AnonymousCredential();
|
||||
const pipeline = StorageURL.newPipeline(credentials, {
|
||||
// Enable logger when debugging
|
||||
// logger: new ConsoleHttpPipelineLogger(HttpPipelineLogLevel.INFO)
|
||||
});
|
||||
const blobPrimaryURL = `https://${accountName}${accountNameSuffix}.blob.core.windows.net${accountSAS}`;
|
||||
return new ServiceURL(blobPrimaryURL, pipeline);
|
||||
}
|
||||
|
||||
export function getBSU(): ServiceURL {
|
||||
return getGenericBSU("");
|
||||
}
|
||||
|
||||
export function getAlternateBSU(): ServiceURL {
|
||||
return getGenericBSU("SECONDARY_", "-secondary");
|
||||
}
|
||||
|
||||
/**
|
||||
* Read body from downloading operation methods to string.
|
||||
* Work on both Node.js and browser environment.
|
||||
*
|
||||
* @param response Convenience layer methods response with downloaded body
|
||||
* @param length Length of Readable stream, needed for Node.js environment
|
||||
*/
|
||||
export async function bodyToString(
|
||||
response: {
|
||||
readableStreamBody?: NodeJS.ReadableStream;
|
||||
blobBody?: Promise<Blob>;
|
||||
},
|
||||
// tslint:disable-next-line:variable-name
|
||||
_length?: number
|
||||
): Promise<string> {
|
||||
const blob = await response.blobBody!;
|
||||
return blobToString(blob);
|
||||
}
|
||||
|
||||
export async function blobToString(blob: Blob): Promise<string> {
|
||||
const fileReader = new FileReader();
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
fileReader.onloadend = (ev: any) => {
|
||||
resolve(ev.target!.result);
|
||||
};
|
||||
fileReader.onerror = reject;
|
||||
fileReader.readAsText(blob);
|
||||
});
|
||||
}
|
||||
|
||||
export async function blobToArrayBuffer(blob: Blob): Promise<ArrayBuffer> {
|
||||
const fileReader = new FileReader();
|
||||
return new Promise<ArrayBuffer>((resolve, reject) => {
|
||||
fileReader.onloadend = (ev: any) => {
|
||||
resolve(ev.target!.result);
|
||||
};
|
||||
fileReader.onerror = reject;
|
||||
fileReader.readAsArrayBuffer(blob);
|
||||
});
|
||||
}
|
||||
|
||||
export function arrayBufferEqual(
|
||||
buf1: ArrayBuffer,
|
||||
buf2: ArrayBuffer
|
||||
): boolean {
|
||||
if (buf1.byteLength !== buf2.byteLength) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint8Arr1 = new Uint8Array(buf1);
|
||||
const uint8Arr2 = new Uint8Array(buf2);
|
||||
|
||||
for (let i = 0; i <= uint8Arr1.length; i++) {
|
||||
if (uint8Arr1[i] !== uint8Arr2[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
export function isIE(): boolean {
|
||||
const sAgent = window.navigator.userAgent;
|
||||
const Idx = sAgent.indexOf("MSIE");
|
||||
|
||||
// If IE, return version number.
|
||||
if (Idx > 0) {
|
||||
return true;
|
||||
} else if (!!navigator.userAgent.match(/Trident\/7\./)) {
|
||||
// IE 11
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
} // It is not IE
|
||||
}
|
||||
|
||||
// Mock a Browser file with specified name and size
|
||||
export function getBrowserFile(name: string, size: number): File {
|
||||
const uint8Arr = new Uint8Array(size);
|
||||
for (let j = 0; j < size; j++) {
|
||||
uint8Arr[j] = Math.floor(Math.random() * 256);
|
||||
}
|
||||
|
||||
// IE11 & Edge doesn't support create File using var file = new File([binary], name);
|
||||
// We leverage Blob() to mock a File
|
||||
|
||||
const file = new Blob([uint8Arr]) as any;
|
||||
file.name = name;
|
||||
return file;
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
import * as crypto from "crypto";
|
||||
import * as fs from "fs";
|
||||
import * as path from "path";
|
||||
|
||||
import { SharedKeyCredential } from "../../lib/credentials/SharedKeyCredential";
|
||||
import { ServiceURL } from "../../lib/ServiceURL";
|
||||
import { StorageURL } from "../../lib/StorageURL";
|
||||
import { getUniqueName } from "./testutils.common";
|
||||
|
||||
export * from "./testutils.common";
|
||||
|
||||
export function getGenericBSU(
|
||||
accountType: string,
|
||||
accountNameSuffix: string = ""
|
||||
): ServiceURL {
|
||||
const accountNameEnvVar = `${accountType}ACCOUNT_NAME`;
|
||||
const accountKeyEnvVar = `${accountType}ACCOUNT_KEY`;
|
||||
|
||||
let accountName: string | undefined;
|
||||
let accountKey: string | undefined;
|
||||
|
||||
accountName = process.env[accountNameEnvVar];
|
||||
accountKey = process.env[accountKeyEnvVar];
|
||||
|
||||
if (!accountName || !accountKey || accountName === "" || accountKey === "") {
|
||||
throw new Error(
|
||||
`${accountNameEnvVar} and/or ${accountKeyEnvVar} environment variables not specified.`
|
||||
);
|
||||
}
|
||||
|
||||
const credentials = new SharedKeyCredential(accountName, accountKey);
|
||||
const pipeline = StorageURL.newPipeline(credentials, {
|
||||
// Enable logger when debugging
|
||||
// logger: new ConsoleHttpPipelineLogger(HttpPipelineLogLevel.INFO)
|
||||
});
|
||||
const blobPrimaryURL = `https://${accountName}${accountNameSuffix}.blob.core.windows.net/`;
|
||||
return new ServiceURL(blobPrimaryURL, pipeline);
|
||||
}
|
||||
|
||||
export function getBSU(): ServiceURL {
|
||||
return getGenericBSU("");
|
||||
}
|
||||
|
||||
export function getAlternateBSU(): ServiceURL {
|
||||
return getGenericBSU("SECONDARY_", "-secondary");
|
||||
}
|
||||
|
||||
/**
|
||||
* Read body from downloading operation methods to string.
|
||||
* Work on both Node.js and browser environment.
|
||||
*
|
||||
* @param response Convenience layer methods response with downloaded body
|
||||
* @param length Length of Readable stream, needed for Node.js environment
|
||||
*/
|
||||
export async function bodyToString(
|
||||
response: {
|
||||
readableStreamBody?: NodeJS.ReadableStream;
|
||||
blobBody?: Promise<Blob>;
|
||||
},
|
||||
length?: number
|
||||
): Promise<string> {
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
response.readableStreamBody!.on("readable", () => {
|
||||
let chunk;
|
||||
chunk = response.readableStreamBody!.read(length);
|
||||
if (chunk) {
|
||||
resolve(chunk.toString());
|
||||
}
|
||||
});
|
||||
|
||||
response.readableStreamBody!.on("error", reject);
|
||||
});
|
||||
}
|
||||
|
||||
export async function createRandomLocalFile(
|
||||
folder: string,
|
||||
blockNumber: number,
|
||||
blockSize: number
|
||||
): Promise<string> {
|
||||
return new Promise<string>((resolve, reject) => {
|
||||
const destFile = path.join(folder, getUniqueName("tempfile."));
|
||||
const ws = fs.createWriteStream(destFile);
|
||||
let offsetInMB = 0;
|
||||
|
||||
function randomValueHex(len = blockSize) {
|
||||
return crypto
|
||||
.randomBytes(Math.ceil(len / 2))
|
||||
.toString("hex") // convert to hexadecimal format
|
||||
.slice(0, len); // return required number of characters
|
||||
}
|
||||
|
||||
ws.on("open", () => {
|
||||
// tslint:disable-next-line:no-empty
|
||||
while (offsetInMB++ < blockNumber && ws.write(randomValueHex())) {}
|
||||
if (offsetInMB >= blockNumber) {
|
||||
ws.end();
|
||||
}
|
||||
});
|
||||
|
||||
ws.on("drain", () => {
|
||||
// tslint:disable-next-line:no-empty
|
||||
while (offsetInMB++ < blockNumber && ws.write(randomValueHex())) {}
|
||||
if (offsetInMB >= blockNumber) {
|
||||
ws.end();
|
||||
}
|
||||
});
|
||||
ws.on("finish", () => resolve(destFile));
|
||||
ws.on("error", reject);
|
||||
});
|
||||
}
|
||||
|
||||
export async function readStreamToLocalFile(
|
||||
rs: NodeJS.ReadableStream,
|
||||
file: string
|
||||
) {
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const ws = fs.createWriteStream(file);
|
||||
rs.pipe(ws);
|
||||
rs.on("error", reject);
|
||||
ws.on("error", reject);
|
||||
ws.on("finish", resolve);
|
||||
rs.on("end", resolve);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
import { HttpPipelineLogLevel, IHttpPipelineLogger } from "../../lib/Pipeline";
|
||||
import { padStart } from "../../lib/utils/utils.common";
|
||||
|
||||
export function isBrowser(): boolean {
|
||||
return typeof window !== "undefined";
|
||||
}
|
||||
|
||||
export function getUniqueName(prefix: string): string {
|
||||
return `${prefix}${new Date().getTime()}${padStart(
|
||||
Math.floor(Math.random() * 10000).toString(),
|
||||
5,
|
||||
"00000"
|
||||
)}`;
|
||||
}
|
||||
|
||||
export async function sleep(time: number): Promise<void> {
|
||||
return new Promise<void>(resolve => {
|
||||
setTimeout(resolve, time);
|
||||
});
|
||||
}
|
||||
|
||||
export function base64encode(content: string): string {
|
||||
return isBrowser() ? btoa(content) : Buffer.from(content).toString("base64");
|
||||
}
|
||||
|
||||
export function base64decode(encodedString: string): string {
|
||||
return isBrowser()
|
||||
? atob(encodedString)
|
||||
: Buffer.from(encodedString, "base64").toString();
|
||||
}
|
||||
|
||||
export class ConsoleHttpPipelineLogger implements IHttpPipelineLogger {
|
||||
constructor(public minimumLogLevel: HttpPipelineLogLevel) {}
|
||||
public log(logLevel: HttpPipelineLogLevel, message: string): void {
|
||||
const logMessage = `${new Date().toISOString()} ${
|
||||
HttpPipelineLogLevel[logLevel]
|
||||
}: ${message}`;
|
||||
switch (logLevel) {
|
||||
case HttpPipelineLogLevel.ERROR:
|
||||
// tslint:disable-next-line:no-console
|
||||
console.error(logMessage);
|
||||
break;
|
||||
case HttpPipelineLogLevel.WARNING:
|
||||
// tslint:disable-next-line:no-console
|
||||
console.warn(logMessage);
|
||||
break;
|
||||
case HttpPipelineLogLevel.INFO:
|
||||
// tslint:disable-next-line:no-console
|
||||
console.log(logMessage);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function wait(time: number): Promise<void> {
|
||||
return new Promise<void>(resolve => {
|
||||
setTimeout(resolve, time);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"alwaysStrict": true,
|
||||
"noImplicitAny": true,
|
||||
"preserveConstEnums": true,
|
||||
"sourceMap": true,
|
||||
"newLine": "LF",
|
||||
"target": "es5",
|
||||
"moduleResolution": "node",
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"strict": true,
|
||||
"module": "esNext",
|
||||
"outDir": "./dist-esm",
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"importHelpers": true,
|
||||
"declarationDir": "./typings",
|
||||
"lib": ["dom", "es5", "es6", "es7", "esnext"],
|
||||
"esModuleInterop": true
|
||||
},
|
||||
"compileOnSave": true,
|
||||
"exclude": ["node_modules"],
|
||||
"include": ["./lib/**/*.ts", "./test/**/*.ts"]
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"defaultSeverity": "error",
|
||||
"extends": ["tslint:recommended"],
|
||||
"jsRules": {},
|
||||
"rules": {
|
||||
"trailing-comma": false,
|
||||
"arrow-parens": false
|
||||
},
|
||||
"rulesDirectory": [],
|
||||
"linterOptions": {
|
||||
"exclude": ["lib/generated/**"]
|
||||
}
|
||||
}
|
Загрузка…
Ссылка в новой задаче