Remove event-processor-host (#21591)
This package has been deprecated for over two years and is no longer actively maintained.
This commit is contained in:
Родитель
c81be393ae
Коммит
9372a73be4
|
@ -116,10 +116,6 @@
|
|||
"name": "eventhubs-checkpointstore-blob",
|
||||
"path": "sdk/eventhub/eventhubs-checkpointstore-blob"
|
||||
},
|
||||
{
|
||||
"name": "event-processor-host",
|
||||
"path": "sdk/eventhub/event-processor-host"
|
||||
},
|
||||
{
|
||||
"name": "form-recognizer",
|
||||
"path": "sdk/formrecognizer/ai-form-recognizer"
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"include": [
|
||||
"dist-esm/src/**/*.js"
|
||||
],
|
||||
"exclude": [
|
||||
"**/*.d.ts"
|
||||
],
|
||||
"reporter": [
|
||||
"text-summary",
|
||||
"html",
|
||||
"cobertura"
|
||||
],
|
||||
"exclude-after-remap":false,
|
||||
"sourceMap": true,
|
||||
"instrument": true,
|
||||
"all": true
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
# Release History
|
||||
|
||||
## 2.1.1 (2019-12-12)
|
||||
|
||||
- Updates minimum version of `@azure/event-hubs` from version 2.1.1 to version 2.1.4.
|
||||
This update brings in improvements when attempting to reconnect after a transient failure.
|
||||
See the [changelog](https://github.com/Azure/azure-sdk-for-js/blob/%40azure/event-hubs_2.1.4/sdk/eventhub/event-hubs/changelog.md#2019-12-11-214)
|
||||
for `@azure/event-hubs` for a full list of the improvements since version 2.1.1.
|
||||
|
||||
## 2.1.0 (2019-08-06)
|
||||
|
||||
- Added support for WebSockets. WebSockets enable Event processor Host to work over an HTTP proxy and in environments where the standard AMQP port 5671 is blocked.
|
||||
Refer to the [websockets](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-processor-host/samples/typescript/src/websockets.ts) sample to see how to use WebSockets.
|
||||
- Fixed [bug 4363](https://github.com/Azure/azure-sdk-for-js/issues/4363) which stopped users from providing their own LeaseManager. If both a lease manager and the options for leaseDuration/leaseRenewInterval are provided, then the latter will be ignored in favor of the leaseDuration/leaseRenewInterval properties on the lease manager.
|
||||
|
||||
## 2.0.0 (2019-07-16)
|
||||
|
||||
- Use the latest version of the dependency on [@azure/event-hubs](https://www.npmjs.com/package/@azure/event-hubs/v/2.1.1) that has the following bug fixes
|
||||
- Added event handlers for `error` and `protocolError` events on the connection object to avoid the case of unhandled exceptions. This is related to the [bug 4136](https://github.com/Azure/azure-sdk-for-js/issues/4136)
|
||||
- A network connection lost error is now treated as retryable error. A new error with name `ConnectionLostError`
|
||||
is introduced for this scenario which you can see if you enable the [logs](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/eventhub/event-processor-host#debug-logs).
|
||||
- When recovering from an error that caused the underlying AMQP connection to get disconnected,
|
||||
[rhea](https://github.com/amqp/rhea/issues/205) reconnects all the older AMQP links on the connection
|
||||
resulting in the below 2 errors in the logs. We now clear rhea's internal map to avoid such reconnections.
|
||||
We already have code in place to create new AMQP links to resume send/receive operations.
|
||||
- InvalidOperationError: A link to connection '.....' \$cbs node has already been opened.
|
||||
- UnauthorizedError: Unauthorized access. 'Listen' claim(s) are required to perform this operation.
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- If you have been using the `createFromAadTokenCredentials` function or the `createFromAadTokenCredentialsWithCustomCheckpointAndLeaseManager` function to create an instance of the
|
||||
`EventProcessorHost`, you will now need to use the [@azure/ms-rest-nodeauth](https://www.npmjs.com/package/@azure/ms-rest-nodeauth)
|
||||
library instead of [ms-rest-azure](https://www.npmjs.com/package/ms-rest-azure) library to create
|
||||
the credentials that are needed by these functions. - Typescript: Replace `import * from "ms-rest-azure";` with `import * from "@azure/ms-rest-nodeauth";` - Javascript: Replace `require("ms-rest-azure")` with `require("@azure/ms-rest-nodeauth")`
|
||||
|
||||
## 1.0.6 (2018-10-05)
|
||||
|
||||
- Remove `@azure/amqp-common` and `rhea-promise` as dependencies, since we use very little from
|
||||
those libraries and there is a risk of having two instances of rhea in the dependency chain which
|
||||
can cause problems while encoding types for filters.
|
||||
- `HostContext.connectionConfig` is now of type `EventHubConnectionConfig`.
|
||||
- Minimum dependency on `@azure/event-hubs: "^1.0.6"`.
|
||||
|
||||
## 1.0.5 (2018-10-01)
|
||||
|
||||
- Bumping minimum version of @azure/event-hubs to "1.0.5".
|
||||
- Taking a dependency on "@azure/amqp-common" for reusing the common parts.
|
||||
|
||||
## 1.0.4 (2018-09-25)
|
||||
|
||||
- Bumping minimum version of @azure/event-hubs to "1.0.4".
|
||||
|
||||
## 1.0.3 (2018-09-25)
|
||||
|
||||
- Ensures that amqp:link-stolen errors are not notified to the customer, since they are expected errors that
|
||||
happen during lease stealing or expiration as a part of load balancing.
|
||||
|
||||
## 1.0.2 (2018-09-15)
|
||||
|
||||
- Ensures that messages are checkpointed in order.
|
||||
|
||||
## 1.0.1 (2018-09-14)
|
||||
|
||||
- `eph.getPartitionInformation()` should works as expected when partitionId is of type `number | string`.
|
||||
- updated documentation for `eventHubPath` optional property in the `FromConnectionStringOptions` object.
|
||||
|
||||
## 1.0.0 (2018-09-12)
|
||||
|
||||
- Stable version of the library.
|
||||
|
||||
## 0.2.0 (2018-09-12)
|
||||
|
||||
- Added support to automatically balance the load of receiving messages across multiple partitions.
|
||||
- Added static method to create an EPH from an `IotHubConnectionString`
|
||||
- Added user-agent to the underlying amqp-connection. This would help in tracking usage of EPH.
|
||||
- Changed the overall design of EPH.
|
||||
- Instead of attaching handlers on `eph:message` and `eph:error`, now the handlers need to be passed
|
||||
as arguments to the `start()` method on EPH.
|
||||
- Apart from that an additional handler/method can be passed as an optional property `onEphError`
|
||||
to EPH. This handler will receive notifications from EPH regarding any errors that occur during
|
||||
partition management.
|
||||
- Removed optional property `leasecontainerName` and replaced it with a required parameter `storageContainerName` wherever applicable in all the static methods on `EventProcessorHost`.
|
||||
- Removed optional property `autoCheckpoint` and added optional properties
|
||||
- `checkpointManager`
|
||||
- `onEphError`
|
||||
- `leaseRenewInterval`
|
||||
- `leaseDuration`
|
||||
- Please take a look at the [examples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/eventhub/event-processor-host/samples) for more details.
|
||||
|
||||
## 0.1.4 (2018-07-16)
|
||||
|
||||
- Added an option `autoCheckpoint: false` to not checkpoint the received messages by default.
|
||||
|
||||
## 0.1.3 (2018-06-13)
|
||||
|
||||
- `_storageBlobPrefix` is set if provided in the options, [#91](https://github.com/Azure/azure-event-hubs-node/pull/91).
|
||||
|
||||
## 0.1.2 (2018-06-13)
|
||||
|
||||
- Fixed an issue reported in [#80](https://github.com/Azure/azure-event-hubs-node/issues/80).
|
||||
|
||||
## 0.1.1 (2018-05-02)
|
||||
|
||||
- Fix dependency version.
|
||||
|
||||
## 0.1.0 (2018-05-02)
|
||||
|
||||
- First version of `azure-event-processor-host` based on the new `azure-event-hubs` sdk.
|
||||
- This client library makes it easier to manage receivers for an EventHub.
|
||||
- You can checkpoint the received data to an Azure Storage Blob. The processor does checkpointing
|
||||
on your behalf at regular intervals. This makes it easy to start receiving events from the point you
|
||||
left at a later time.
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2020 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,335 +0,0 @@
|
|||
# @azure/event-processor-host
|
||||
|
||||
> Please note, a newer package [@azure/event-hubs](https://www.npmjs.com/package/@azure/event-hubs) is available as of January, 2020. While this package will continue to receive critical bug fixes, we strongly encourage you to upgrade. See the [migration guide](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-hubs/migrationguide.md) for more details.
|
||||
|
||||
Azure Event Processor Host helps you efficiently receive events from an EventHub. It will create EventHub Receivers
|
||||
across all the partitions in the provided consumer group of an EventHub and provide you messages received across
|
||||
all the partitions. It will checkpoint metadata about the received messages at regular interval in an
|
||||
Azure Storage Blob. This makes it easy to continue receiving messages from where you left at a later time.
|
||||
|
||||
#### Conceptual Overview
|
||||
|
||||
![overview](https://raw.githubusercontent.com/Azure/azure-sdk-for-js/main/sdk/eventhub/event-processor-host/eph.png)
|
||||
|
||||
- More information about Azure Event Processor Host can be found over [here](https://docs.microsoft.com/azure/event-hubs/event-hubs-event-processor-host).
|
||||
- General overview of how the Event Processor Host SDK works internally can be found over [here](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-processor-host/overview.md).
|
||||
|
||||
## Pre-requisite
|
||||
|
||||
- **Node.js version: 6.x or higher.**
|
||||
- We would **still encourage you** to install the latest available LTS version at any given time from https://nodejs.org. **It is a good practice to always install the latest available LTS version of node.js.**
|
||||
- Installing node.js on **Windows or macOS** is very simple with available installers on the [node.js website](https://nodejs.org). If you are using a **linux based OS**, then you can find easy to follow, one step installation instructions over [here](https://nodejs.org/en/download/package-manager/).
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @azure/event-processor-host
|
||||
```
|
||||
|
||||
## IDE
|
||||
|
||||
This sdk has been developed in [TypeScript](https://typescriptlang.org) and has good source code documentation. It is highly recommended to use [vscode](https://code.visualstudio.com)
|
||||
or any other IDE that provides better intellisense and exposes the full power of source code documentation.
|
||||
|
||||
## Debug logs
|
||||
|
||||
You can set the following environment variable to get the debug logs.
|
||||
|
||||
- Getting debug logs **only** from the Event Processor Host SDK
|
||||
|
||||
```bash
|
||||
export DEBUG=azure:eph*
|
||||
```
|
||||
|
||||
- Getting debug logs from the Event Processor Host SDK **and** the protocol level library.
|
||||
|
||||
```bash
|
||||
export DEBUG=azure:eph*,rhea*
|
||||
```
|
||||
|
||||
- Getting debug logs from the **Event Processor Host SDK, the Event Hub SDK and the protocol level library.**
|
||||
|
||||
```bash
|
||||
export DEBUG=azure*,rhea*
|
||||
```
|
||||
|
||||
- If you are **not interested in viewing the message transformation** (which consumes lot of console/disk space) then you can set the `DEBUG` environment variable as follows:
|
||||
|
||||
```bash
|
||||
export DEBUG=azure*,rhea*,-rhea:raw,-rhea:message,-azure:amqp-common:datatransformer
|
||||
```
|
||||
|
||||
- If you are interested only in **errors**, then you can set the `DEBUG` environment variable as follows:
|
||||
|
||||
```bash
|
||||
export DEBUG=azure:eph:error,azure:event-hubs:error,azure-amqp-common:error,rhea-promise:error,rhea:events,rhea:frames,rhea:io,rhea:flow
|
||||
```
|
||||
|
||||
#### Logging to a file
|
||||
|
||||
- Set the `DEBUG` environment variable as shown above and then run your test script as follows:
|
||||
- Logging statements from your test script go to `out.log` and logging statements from the sdk go to `debug.log`.
|
||||
```bash
|
||||
node your-test-script.js > out.log 2>debug.log
|
||||
```
|
||||
- Logging statements from your test script and the sdk go to the same file `out.log` by redirecting stderr to stdout (&1), and then redirect stdout to a file:
|
||||
```bash
|
||||
node your-test-script.js >out.log 2>&1
|
||||
```
|
||||
- Logging statements from your test script and the sdk go to the same file `out.log`.
|
||||
```bash
|
||||
node your-test-script.js &> out.log
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
- You will find the sample provided below demonstrates a multi eph instance in the same process. Since node.js is single threaded, it has to load balance between managing(renew, steal, acquire, update) leases and receive messages across all the partitions. It is better to
|
||||
create each instance in a separate process or a separate machine. This should provide better results.
|
||||
|
||||
## Examples
|
||||
|
||||
- Examples can be found over
|
||||
[here](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/eventhub/event-processor-host/samples).
|
||||
|
||||
## Usage
|
||||
|
||||
### NOTE
|
||||
|
||||
The following samples focus on EPH (Event Processor Host) which is responsible for receiving messages.
|
||||
For sending messages to the EventHub, please use the `@azure/event-hubs` package from npm. More
|
||||
information about the event hub client can be found over [here](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-hubs).
|
||||
You can also use [this example](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-processor-host/samples/typescript/src/sendBatch.ts) that sends
|
||||
multiple messages batched together. You should be able to run the `send` example from one terminal window and see those messages
|
||||
being received in the `singleEph` or `multipleEph` example being run in the second terminal window.
|
||||
|
||||
### Single EPH instance.
|
||||
|
||||
```js
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
const path = process.env.EVENTHUB_NAME;
|
||||
const storageCS = process.env.STORAGE_CONNECTION_STRING;
|
||||
const ehCS = process.env.EVENTHUB_CONNECTION_STRING;
|
||||
const storageContainerName = "test-container";
|
||||
|
||||
async function main() {
|
||||
// Create the Event Processo Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName("my-host"),
|
||||
storageCS,
|
||||
storageContainerName,
|
||||
ehCS,
|
||||
{
|
||||
eventHubPath: path
|
||||
},
|
||||
onEphError: (error) => {
|
||||
console.log("This handler will notify you of any internal errors that happen " +
|
||||
"during partition and lease management: %O", error);
|
||||
}
|
||||
);
|
||||
let count = 0;
|
||||
// Message event handler
|
||||
const onMessage = async (context/*PartitionContext*/, data /*EventData*/) => {
|
||||
console.log(">>>>> Rx message from '%s': '%s'", context.partitionId, data.body);
|
||||
count++;
|
||||
// let us checkpoint every 100th message that is received across all the partitions.
|
||||
if (count % 100 === 0) {
|
||||
return await context.checkpoint();
|
||||
}
|
||||
};
|
||||
// Error event handler
|
||||
const onError = (error) => {
|
||||
console.log(">>>>> Received Error: %O", error);
|
||||
};
|
||||
// start the EPH
|
||||
await eph.start(onMessage, onError);
|
||||
// After some time let' say 2 minutes
|
||||
await delay(120000);
|
||||
// This will stop the EPH.
|
||||
await eph.stop();
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log(err);
|
||||
});
|
||||
```
|
||||
|
||||
### Multiple EPH instances in the same process.
|
||||
|
||||
This example creates 2 instances of EPH in the same process. It is also perfectly fine to create
|
||||
multiple EPH instances in different processes on the same or different machine.
|
||||
|
||||
```js
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
// set the values from environment variables.
|
||||
const path = process.env.EVENTHUB_NAME || "";
|
||||
const storageCS = process.env.STORAGE_CONNECTION_STRING;
|
||||
const ehCS = process.env.EVENTHUB_CONNECTION_STRING;
|
||||
|
||||
// set the names of eph and the lease container.
|
||||
const storageContainerName = "test-container";
|
||||
const ephName1 = "eph-1";
|
||||
const ephName2 = "eph-2";
|
||||
|
||||
/**
|
||||
* The main function that executes the sample.
|
||||
*/
|
||||
async function main() {
|
||||
// 1. Start eph-1.
|
||||
const eph1 = await startEph(ephName1);
|
||||
await sleep(20);
|
||||
// 2. After 20 seconds start eph-2.
|
||||
const eph2 = await startEph(ephName2);
|
||||
await sleep(90);
|
||||
// 3. Now, load will be evenly balanced between eph-1 and eph-2. After 90 seconds stop eph-1.
|
||||
await stopEph(eph1);
|
||||
await sleep(40);
|
||||
// 4. Now, eph-1 will regain access to all the partitions and will close after 40 seconds.
|
||||
await stopEph(eph2);
|
||||
}
|
||||
|
||||
// calling the main().
|
||||
main().catch((err) => {
|
||||
console.log("Exiting from main() due to an error: %O.", err);
|
||||
});
|
||||
|
||||
/**
|
||||
* Sleeps for the given number of seconds.
|
||||
* @param timeInSeconds Time to sleep in seconds.
|
||||
*/
|
||||
async function sleep(timeInSeconds /**number**/) {
|
||||
console.log(">>>>>> Sleeping for %d seconds..", timeInSeconds);
|
||||
await delay(timeInSeconds * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName /**string**/) {
|
||||
// Create the Event Processor Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
ephName,
|
||||
storageCS,
|
||||
storageContainerName,
|
||||
ehCS,
|
||||
{
|
||||
eventHubPath: path,
|
||||
// This method will provide errors that occur during lease and partition management. The
|
||||
// errors that occur while receiving messages will be provided in the onError handler
|
||||
// provided in the eph.start() method.
|
||||
onEphError: (error) => {
|
||||
console.log(">>>>>>> [%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
let count = 0;
|
||||
const onMessage /**OnReceivedMessage**/ = async (
|
||||
context /**PartitionContext**/,
|
||||
data /**EventData**/
|
||||
) => {
|
||||
count++;
|
||||
console.log(
|
||||
"##### [%s] %d - Rx message from '%s': '%s'",
|
||||
ephName,
|
||||
count,
|
||||
context.partitionId,
|
||||
data.body
|
||||
);
|
||||
// Checkpointing every 200th event that is received acrosss all the partitions.
|
||||
if (count % 200 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"***** [%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpoint();
|
||||
console.log("$$$$ [%s] Successfully checkpointed message number %d", ephName, count);
|
||||
} catch (err) {
|
||||
console.log(
|
||||
">>>>>>> [%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
count,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError /**OnReceivedError**/ = (error) => {
|
||||
console.log(">>>>> [%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log(">>>>>> Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph /**EventProcessorHost**/) {
|
||||
console.log(">>>>>> Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log(">>>>>> Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
||||
```
|
||||
|
||||
### EPH with IotHub connection string
|
||||
|
||||
```js
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
const path = process.env.EVENTHUB_NAME || "";
|
||||
const storageCS = process.env.STORAGE_CONNECTION_STRING;
|
||||
const iothubCS = process.env.IOTHUB_CONNECTION_STRING;
|
||||
const storageContainerName = "test-container";
|
||||
|
||||
async function main() {
|
||||
// Create the Event Processo Host
|
||||
const eph = await EventProcessorHost.createFromIotHubConnectionString(
|
||||
EventProcessorHost.createHostName("my-host"),
|
||||
storageCS,
|
||||
storageContainerName,
|
||||
iothubCS,
|
||||
{
|
||||
eventHubPath: path
|
||||
}
|
||||
);
|
||||
let count = 0;
|
||||
// Message event handler
|
||||
const onMessage = async (context /*PartitionContext*/, data /*EventData*/) => {
|
||||
console.log(">>>>> Rx message from '%s': '%s'", context.partitionId, data.body);
|
||||
count++;
|
||||
// let us checkpoint every 100th message that is received across all the partitions.
|
||||
if (count % 100 === 0) {
|
||||
return await context.checkpoint();
|
||||
}
|
||||
};
|
||||
// Error event handler
|
||||
const onError = (error) => {
|
||||
console.log(">>>>> Received Error: %O", error);
|
||||
};
|
||||
// start the EPH
|
||||
await eph.start(onMessage, onError);
|
||||
// After some time let' say 2 minutes
|
||||
await delay(120000);
|
||||
// This will stop the EPH.
|
||||
await eph.stop();
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log(err);
|
||||
});
|
||||
```
|
||||
|
||||
## AMQP Dependencies
|
||||
|
||||
It depends on [rhea](https://github.com/amqp/rhea) library for managing connections, sending and receiving events over the [AMQP](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf) protocol.
|
||||
|
||||
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Feventhub%2Fevent-processor-host%2FREADME.png)
|
|
@ -1,31 +0,0 @@
|
|||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||
"mainEntryPointFilePath": "typings/src/index.d.ts",
|
||||
"docModel": {
|
||||
"enabled": true
|
||||
},
|
||||
"apiReport": {
|
||||
"enabled": true,
|
||||
"reportFolder": "./review"
|
||||
},
|
||||
"dtsRollup": {
|
||||
"enabled": true,
|
||||
"untrimmedFilePath": "",
|
||||
"publicTrimmedFilePath": "./typings/event-processor-host.d.ts"
|
||||
},
|
||||
"messages": {
|
||||
"tsdocMessageReporting": {
|
||||
"default": {
|
||||
"logLevel": "none"
|
||||
}
|
||||
},
|
||||
"extractorMessageReporting": {
|
||||
"ae-missing-release-tag": {
|
||||
"logLevel": "none"
|
||||
},
|
||||
"ae-unresolved-link": {
|
||||
"logLevel": "none"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Двоичные данные
sdk/eventhub/event-processor-host/eph.png
Двоичные данные
sdk/eventhub/event-processor-host/eph.png
Двоичный файл не отображается.
До Ширина: | Высота: | Размер: 1008 KiB |
|
@ -1,136 +0,0 @@
|
|||
# General Overview of Microsoft Azure Event Processor Host (@azure/event-processor-host) for JavaScript
|
||||
|
||||
Event Processor Host is built on top of the Microsoft Azure Event Hubs Client `@azure/event-hubs` for JS and provides a number of features not present in that lower layer:
|
||||
|
||||
1. Event Processor Host removes the need to think about partitions. By default, it creates one instance of the event
|
||||
hub client for each partition. Each instance will only ever handle
|
||||
events from one partition, further simplifying the processing code.
|
||||
2. Event Processor Host allows easy load balancing. Utilizing a shared persistent store for leases on partitions
|
||||
(by default based on Azure Storage), instances of Event Processor Host receiving from the same consumer group
|
||||
of the same Event Hub can be spread across multiple machines and partitions will be distributed across those
|
||||
machines as evenly as possible. These instances can be started and stopped at any time, and partitions will be
|
||||
redistributed as needed. It is even allowed to have more instances than partitions as a form of hot standby. (Note that
|
||||
partition distribution is based solely on the number of partitions per instance, not event flow rate or any other metric.)
|
||||
3. Event Processor Host allows the event processor to create a persistent "checkpoint" that describes a position in
|
||||
the partition's event stream, and if restarted it automatically begins receiving at the next event after the checkpoint.
|
||||
Because checkpointing is usually an expensive operation, it is up to you to create
|
||||
them from within the `onMessage` hander, at whatever interval is suitable for your application. For example, an application with relatively infrequent messages might checkpoint after processing each one, whereas an application that requires high performance in the processing code in order to keep up with event flow might checkpoint once every hundred messages, or once
|
||||
per second.
|
||||
|
||||
## Using Event Processor Host
|
||||
|
||||
### Step 1: Instantiate the Event Processor Host and provide a general error notification handler
|
||||
|
||||
Instantiate the EPH using one of the many static methods that is the best fit for you. You can also
|
||||
provide a general error notification handler. It will let you know about internal errors that happen
|
||||
while managing partitions.
|
||||
|
||||
```js
|
||||
import {
|
||||
EventProcessorHost, OnReceivedError, OnReceivedMessage, EventData, PartitionContext, delay
|
||||
} from "@azure/event-processor-host";
|
||||
|
||||
const path = process.env.EVENTHUB_NAME;
|
||||
const storageCS = process.env.STORAGE_CONNECTION_STRING; // you can get this from https://portal.azure.com
|
||||
const ehCS = process.env.EVENTHUB_CONNECTION_STRING;
|
||||
// creates a unique storageContainer name for every run
|
||||
// if you wish to keep the name same between different runs then use the following then that is fine as well.
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
const ephName = "my-eph";
|
||||
|
||||
// Create the Event Processo Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(ephName),
|
||||
storageCS!,
|
||||
storageContainerName,
|
||||
ehCS!,
|
||||
{
|
||||
eventHubPath: path,
|
||||
onEphError: (error) => {
|
||||
console.log(">>>>>>> [%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
### Step 2: Implement the message handler and the error handler and start the EPH
|
||||
The `onMessage` handler processes all the received events from different partitions. It provides,
|
||||
the partition context and the EventData. PartitionContext provides the means to create a checkpoint for the partition. Please make sure to checkpoint within a `try/catch` block.
|
||||
|
||||
```js
|
||||
const onMessage: OnReceivedMessage = async (context: PartitionContext, data: EventData) => {
|
||||
console.log("##### [%s] - Rx message from partition: '%s', offset: '%s'", ephName, context.partitionId, data.offset);
|
||||
// Checkpointing every 100th event received for a given partition.
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
const num = partionCount[context.partitionId];
|
||||
try {
|
||||
console.log("$$$$ [%s] Attempting to checkpoint message number %d", ephName, num);
|
||||
await context.checkpoint();
|
||||
console.log("$$$$ [%s] Successfully checkpointed message number %d", ephName, num);
|
||||
} catch (err) {
|
||||
console.log(">>>>> [%s] An error occurred while checkpointing msg number %d: %O", ephName, num, err);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Error handler
|
||||
const onError: OnReceivedError = (error) => {
|
||||
console.log(">>>>> [%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
|
||||
try {
|
||||
await eph.start(onMessage, onError);
|
||||
} catch (err) {
|
||||
console.log("An error occurred while starting the EPH: %O", err);
|
||||
}
|
||||
```
|
||||
### Step 3: Graceful Shutdown
|
||||
|
||||
```js
|
||||
try {
|
||||
await eph.stop();
|
||||
console.log(">>>>>> Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
} catch (err) {
|
||||
console.log("An error occurred while stopping the EPH: %O", err);
|
||||
}
|
||||
```
|
||||
|
||||
## Checkpointing, Partition Ownership, and Reprocessing Messages
|
||||
|
||||
In a system using Event Processor Host, there are one or more hosts processing events from a particular event hub+consumer group combination, and ownership of the partitions of the event hub are split up between the hosts. When a host takes ownership of a partition, it starts a receiver on that partition, and when doing so it must specify the position in the stream of events at which the receiver will begin consuming. If there is a checkpoint for that event hub+consumer group+partition combination available via the checkpoint manager (by default, in Azure Storage), the receiver will begin consuming at the position indicated by the checkpoint.
|
||||
|
||||
Any time a host takes ownership of a partition, reprocessing of events may occur. Exactly how many messages may be reprocessed depends on how often checkpoints are written. Writing a checkpoint with the default checkpoint manager is expensive, since it makes at least one HTTPS call to Azure Storage. The obvious strategy to minimize reprocessing of events is to checkpoint after processing each event, but we advise against this due to the performance hit.
|
||||
In a low-throughput scenario it may be OK, but as the event rate goes up, checkpointing too often could prevent a processor from being able to keep up with the flow. Also, event checkpointing after each event cannot completely prevent event reprocessing, since there will always be some time between finishing
|
||||
processing and writing the checkpoint, during which the processor could fail. Customer applications must be able to detect and handle some amount of reprocessing, and the customer needs to study their particular scenario and application to balance the cost of handling the reprocessing against the performance hit of checkpointing more frequently.
|
||||
|
||||
What can cause ownership of a partition to change:
|
||||
1. Bringing a host online: it will steal ownership of partitions from already-running hosts until the distribution of partitions among hosts is as even as possible.
|
||||
2. A host crashing/losing power/losing network connection/going offline for any reason: the leases on the partitions that the downed host owned will expire and the remaining hosts will find the expired leases and take ownership. This may result in unbalanced distribution to start with which will cause additional ownership changes until the distribution is balanced.
|
||||
3. Azure Storage latency or failures which result in a partition lease expiring because it cannot be renewed in time: other hosts (or even the same host) will find the expired lease and take ownership. Again, this can result in unbalanced distribution and additional ownership changes. This scenario can occur even if there is only one host.
|
||||
4. Certain event hub client errors can cause the processor for a partition to shut down, with the same effects as case 3. This scenario can also occur even with only one host.
|
||||
|
||||
## Internal working of Event Processor Host
|
||||
|
||||
EventHubs supports creating receivers with an `epoch value`. Epoch is of type `number`. At any given point in time, the receiver with the `highest epoch` value can receive messages from an EventHub for a given partition. Already connected receivers with `lower epoch` value or receivers `without an epoch` value will be disconnected. This ensures, that at any given time, there is `only one` receiver receiving messages from a partition in an EventHub. EPH makes use of this key functionality to receive messages from an EventHub.
|
||||
|
||||
### Lease Acquisition:
|
||||
Whenever a lease is acquired for the specified leaseDuration (default 30 seconds), it reads the epoch value and the offset from the blob. It creates the receiver with a higher epoch value (+1) than what was read from the blob, and with an offset read from the blob as the starting point. If there is no offset in the blob, then it starts from the beginning (-1, default value).
|
||||
|
||||
### Lease Renewal:
|
||||
While it is receiving messages, it keeps on renewing the lease at a regular interval (default 10 seconds). If an error occurs while renewing the lease, then it simply logs the error. It does not disconnect the receiver, since the receiver will be automatically disconnected when the lease expires or someone steals the lease.
|
||||
|
||||
The EPH will keep on scanning across all the partitions at some interval. If it was able to steal leases, in the previous scan then it will sleep for lesser time before scanning again. If it did not steal any leases in the previous scan then it sleeps for more time before scanning again.
|
||||
|
||||
When a new instance of EPH comes online, it starts scanning partitions by reading the contents of the LeaseStore. This helps the EPH understand the state of things in it's world.
|
||||
|
||||
During each scan:
|
||||
- It tries to find the number of unique hosts. This helps the EPH determine the ideal number of leases (1 per partition) that it should aim for.
|
||||
- If the number of leases that it owns is less than the desired count, then it attempts to acquire any expired leases (done concurrently, to make full use of async nature of node.js)
|
||||
- If it is able to acquire the lease, then it starts the receiver as described above.
|
||||
- If it still needs more leases, then it will try stealing leases
|
||||
- It decides to steal leases only from those owners that own leases more than the desired count.
|
||||
- It randomly picks one of the biggest owners and tries to steal the desired number of leases from that owner.
|
||||
- While stealing leases (done concurrently, to make full use of async nature of node.js)
|
||||
- If the lease is successfully stolen, then it starts the receiver as described above. This means that some other EPH instance's lease was lost. That EPH instance would have received a disconnect error from that receiver and the receiver would be closed.
|
||||
- Else, logs an error that it was not able to steal the lease and does nothing about it
|
||||
- If it does not steal any leases, it returns false. This drives the amount of time, the EPH will sleep before it starts scanning again.
|
|
@ -1,115 +0,0 @@
|
|||
{
|
||||
"name": "@azure/event-processor-host",
|
||||
"version": "2.1.1",
|
||||
"description": "Azure Event Processor Host (Event Hubs) SDK for JS.",
|
||||
"author": "Microsoft Corporation",
|
||||
"license": "MIT",
|
||||
"homepage": "https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/eventhub/event-processor-host/",
|
||||
"repository": "github:Azure/azure-sdk-for-js",
|
||||
"sideEffects": false,
|
||||
"keywords": [
|
||||
"azure",
|
||||
"cloud",
|
||||
"event hubs",
|
||||
"event processor",
|
||||
"events"
|
||||
],
|
||||
"bugs": {
|
||||
"url": "https://github.com/Azure/azure-sdk-for-js/issues"
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"module": "dist-esm/src/index.js",
|
||||
"types": "./typings/event-processor-host.d.ts",
|
||||
"engines": {
|
||||
"node": ">=12.0.0"
|
||||
},
|
||||
"files": [
|
||||
"dist/",
|
||||
"dist-esm/src/",
|
||||
"typings/event-processor-host.d.ts",
|
||||
"README.md",
|
||||
"LICENSE"
|
||||
],
|
||||
"scripts": {
|
||||
"audit": "node ../../../common/scripts/rush-audit.js && rimraf node_modules package-lock.json && npm i --package-lock-only 2>&1 && npm audit",
|
||||
"build:browser": "npm run build",
|
||||
"build:node": "npm run build",
|
||||
"build:samples": "dev-tool samples prep && cd dist-samples && tsc",
|
||||
"build:test": "npm run build",
|
||||
"build": "npm run clean && tsc -p . && dev-tool run bundle --browser-test=false && api-extractor run --local",
|
||||
"check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"*.{js,json}\"",
|
||||
"clean": "rimraf dist dist-* typings *.tgz *.log",
|
||||
"execute:samples": "echo skipped",
|
||||
"extract-api": "tsc -p . && api-extractor run --local",
|
||||
"format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"*.{js,json}\"",
|
||||
"integration-test:browser": "echo skipped",
|
||||
"integration-test:node": "nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/*.spec.js\"",
|
||||
"integration-test": "npm run integration-test:node && npm run integration-test:browser",
|
||||
"lint:fix": "eslint package.json src test samples --ext .ts --fix --fix-type [problem,suggestion]",
|
||||
"lint": "eslint package.json src test samples --ext .ts -f html -o event-processor-host-lintReport.html || exit 0",
|
||||
"pack": "npm pack 2>&1",
|
||||
"test:browser": "npm run build:test && npm run unit-test:browser && npm run integration-test:browser",
|
||||
"test:node": "npm run build:test && npm run unit-test:node && npm run integration-test:node",
|
||||
"test": "npm run build:test && npm run unit-test && npm run integration-test",
|
||||
"unit-test:browser": "echo skipped",
|
||||
"unit-test:node": "echo skipped",
|
||||
"unit-test": "npm run unit-test:node && npm run unit-test:browser"
|
||||
},
|
||||
"//metadata": {
|
||||
"constantPaths": [
|
||||
{
|
||||
"path": "src/util/constants.ts",
|
||||
"prefix": "version"
|
||||
}
|
||||
]
|
||||
},
|
||||
"//sampleConfiguration": {
|
||||
"skip": [
|
||||
"iothubEph.js",
|
||||
"multiEph.js",
|
||||
"sendBatch.js",
|
||||
"singleEph.js",
|
||||
"websockets.js"
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@azure/event-hubs": "^2.1.4",
|
||||
"@azure/ms-rest-nodeauth": "^0.9.2",
|
||||
"async-lock": "^1.1.3",
|
||||
"azure-storage": "^2.10.2",
|
||||
"debug": "^4.1.1",
|
||||
"path-browserify": "^1.0.0",
|
||||
"tslib": "^2.2.0",
|
||||
"uuid": "^8.3.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@azure/dev-tool": "^1.0.0",
|
||||
"@azure/eslint-plugin-azure-sdk": "^3.0.0",
|
||||
"@microsoft/api-extractor": "7.18.11",
|
||||
"@types/async-lock": "^1.1.0",
|
||||
"@types/chai": "^4.1.6",
|
||||
"@types/chai-as-promised": "^7.1.0",
|
||||
"@types/chai-string": "^1.4.1",
|
||||
"@types/debug": "^4.1.4",
|
||||
"@types/mocha": "^7.0.2",
|
||||
"@types/node": "^12.0.0",
|
||||
"@types/uuid": "^8.0.0",
|
||||
"@types/ws": "^7.2.4",
|
||||
"chai": "^4.2.0",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"chai-string": "^1.5.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"dotenv": "^8.2.0",
|
||||
"eslint": "^7.15.0",
|
||||
"esm": "^3.2.18",
|
||||
"https-proxy-agent": "^5.0.0",
|
||||
"mocha": "^7.1.1",
|
||||
"mocha-junit-reporter": "^2.0.0",
|
||||
"nyc": "^15.0.0",
|
||||
"prettier": "^2.5.1",
|
||||
"rimraf": "^3.0.0",
|
||||
"ts-node": "^10.0.0",
|
||||
"typescript": "~4.2.0",
|
||||
"ws": "^8.2.0"
|
||||
}
|
||||
}
|
|
@ -1,206 +0,0 @@
|
|||
## API Report File for "@azure/event-processor-host"
|
||||
|
||||
> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/).
|
||||
|
||||
```ts
|
||||
|
||||
import { aadEventHubsAudience } from '@azure/event-hubs';
|
||||
import { ApplicationTokenCredentials } from '@azure/ms-rest-nodeauth';
|
||||
import AsyncLock from 'async-lock';
|
||||
import { BlobService as BlobService_2 } from 'azure-storage';
|
||||
import { ClientOptionsBase } from '@azure/event-hubs';
|
||||
import { DataTransformer } from '@azure/event-hubs';
|
||||
import { delay } from '@azure/event-hubs';
|
||||
import { DeviceTokenCredentials } from '@azure/ms-rest-nodeauth';
|
||||
import { Dictionary } from '@azure/event-hubs';
|
||||
import { EventData } from '@azure/event-hubs';
|
||||
import { EventHubClient } from '@azure/event-hubs';
|
||||
import { EventHubConnectionConfig } from '@azure/event-hubs';
|
||||
import { EventHubPartitionRuntimeInformation } from '@azure/event-hubs';
|
||||
import { EventHubRuntimeInformation } from '@azure/event-hubs';
|
||||
import { EventPosition } from '@azure/event-hubs';
|
||||
import { MessagingError } from '@azure/event-hubs';
|
||||
import { MSITokenCredentials } from '@azure/ms-rest-nodeauth';
|
||||
import { OnError } from '@azure/event-hubs';
|
||||
import { ServiceResponse } from 'azure-storage';
|
||||
import { TokenProvider } from '@azure/event-hubs';
|
||||
import { UserTokenCredentials } from '@azure/ms-rest-nodeauth';
|
||||
|
||||
export { aadEventHubsAudience }
|
||||
|
||||
// @public
|
||||
export class BaseLease implements BaseLeaseInfo {
|
||||
constructor(info: BaseLeaseInfo);
|
||||
getInfo(): BaseLeaseInfo;
|
||||
isOwned: boolean;
|
||||
isOwnedBy(possibleOwner: string): boolean;
|
||||
owner: string;
|
||||
readonly partitionId: string;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface BaseLeaseInfo {
|
||||
owner: string;
|
||||
partitionId: string;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface CheckpointInfo {
|
||||
offset: string;
|
||||
partitionId: string;
|
||||
sequenceNumber: number;
|
||||
}
|
||||
|
||||
// @public
|
||||
export namespace CheckpointInfo {
|
||||
export function create(partitionId: string, offset?: string, sequenceNumber?: number): CheckpointInfo;
|
||||
export function createFromEventData(partitionId: string, eventData: EventData): CheckpointInfo;
|
||||
// Warning: (ae-forgotten-export) The symbol "LeaseInfo" needs to be exported by the entry point index.d.ts
|
||||
export function createFromLease(lease: LeaseInfo): CheckpointInfo;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface CheckpointManager {
|
||||
checkpointStoreExists(): Promise<boolean>;
|
||||
createAllCheckpointsIfNotExists(partitionIds: string[]): Promise<void>;
|
||||
createCheckpointStoreIfNotExists(): Promise<void>;
|
||||
deleteCheckpoint(partitionId: string): Promise<void>;
|
||||
deleteCheckpointStore(): Promise<void>;
|
||||
getCheckpoint(partitionId: string): Promise<CheckpointInfo | undefined>;
|
||||
updateCheckpoint(lease: CompleteLease, checkpoint: CheckpointInfo): Promise<void>;
|
||||
}
|
||||
|
||||
// @public
|
||||
export class CompleteLease extends BaseLease {
|
||||
constructor(info: CompleteLeaseInfo);
|
||||
epoch: number;
|
||||
getInfo(): CompleteLeaseInfo;
|
||||
incrementEpoch(): number;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface CompleteLeaseInfo extends BaseLeaseInfo {
|
||||
epoch: number;
|
||||
}
|
||||
|
||||
export { DataTransformer }
|
||||
|
||||
export { delay }
|
||||
|
||||
// @public
|
||||
export interface EPHDiagnosticInfo {
|
||||
action: string;
|
||||
error: any;
|
||||
hostName: string;
|
||||
partitionId: string;
|
||||
}
|
||||
|
||||
export { EventData }
|
||||
|
||||
export { EventHubPartitionRuntimeInformation }
|
||||
|
||||
export { EventHubRuntimeInformation }
|
||||
|
||||
export { EventPosition }
|
||||
|
||||
// @public
|
||||
export class EventProcessorHost {
|
||||
constructor(hostName: string, options?: EventProcessorHostOptions);
|
||||
get consumerGroup(): string;
|
||||
static createFromAadTokenCredentials(hostName: string, storageConnectionString: string, storageContainerName: string, namespace: string, eventHubPath: string, credentials: ApplicationTokenCredentials | UserTokenCredentials | DeviceTokenCredentials | MSITokenCredentials, options?: FromTokenProviderOptions): EventProcessorHost;
|
||||
static createFromAadTokenCredentialsWithCustomCheckpointAndLeaseManager(hostName: string, namespace: string, eventHubPath: string, credentials: ApplicationTokenCredentials | UserTokenCredentials | DeviceTokenCredentials | MSITokenCredentials, checkpointManager: CheckpointManager, leaseManager: LeaseManager, options?: FromTokenProviderOptions): EventProcessorHost;
|
||||
static createFromConnectionString(hostName: string, storageConnectionString: string, storageContainerName: string, eventHubConnectionString: string, options?: FromConnectionStringOptions): EventProcessorHost;
|
||||
static createFromConnectionStringWithCustomCheckpointAndLeaseManager(hostName: string, eventHubConnectionString: string, checkpointManager: CheckpointManager, leaseManager: LeaseManager, options?: FromConnectionStringOptions): EventProcessorHost;
|
||||
// Warning: (ae-forgotten-export) The symbol "FromIotHubConnectionStringOptions" needs to be exported by the entry point index.d.ts
|
||||
static createFromIotHubConnectionString(hostName: string, storageConnectionString: string, storageContainerName: string, iotHubConnectionString: string, options?: FromIotHubConnectionStringOptions): Promise<EventProcessorHost>;
|
||||
static createFromIotHubConnectionStringWithCustomCheckpointAndLeaseManager(hostName: string, iotHubConnectionString: string, checkpointManager: CheckpointManager, leaseManager: LeaseManager, options?: FromIotHubConnectionStringOptions): Promise<EventProcessorHost>;
|
||||
static createFromTokenProvider(hostName: string, storageConnectionString: string, storageContainerName: string, namespace: string, eventHubPath: string, tokenProvider: TokenProvider, options?: FromTokenProviderOptions): EventProcessorHost;
|
||||
static createFromTokenProviderWithCustomCheckpointAndLeaseManager(hostName: string, namespace: string, eventHubPath: string, tokenProvider: TokenProvider, checkpointManager: CheckpointManager, leaseManager: LeaseManager, options?: FromTokenProviderOptions): EventProcessorHost;
|
||||
static createHostName(prefix?: string): string;
|
||||
getHubRuntimeInformation(): Promise<EventHubRuntimeInformation>;
|
||||
getPartitionIds(): Promise<string[]>;
|
||||
getPartitionInformation(partitionId: string | number): Promise<EventHubPartitionRuntimeInformation>;
|
||||
get hostName(): string;
|
||||
get receivingFromPartitions(): string[];
|
||||
start(onMessage: OnReceivedMessage, onError: OnReceivedError): Promise<void>;
|
||||
stop(): Promise<void>;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface EventProcessorHostOptions extends FromConnectionStringOptions {
|
||||
checkpointManager?: CheckpointManager;
|
||||
eventHubConnectionString?: string;
|
||||
leaseManager?: LeaseManager;
|
||||
storageConnectionString?: string;
|
||||
storageContainerName?: string;
|
||||
tokenProvider?: TokenProvider;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface FromConnectionStringOptions extends FromTokenProviderOptions {
|
||||
eventHubPath?: string;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface FromTokenProviderOptions extends ClientOptionsBase {
|
||||
consumerGroup?: string;
|
||||
fastScanInterval?: number;
|
||||
initialOffset?: EventPosition;
|
||||
leaseDuration?: number;
|
||||
leaseRenewInterval?: number;
|
||||
onEphError?: OnEphError;
|
||||
slowScanInterval?: number;
|
||||
startupScanDelay?: number;
|
||||
storageBlobPrefix?: string;
|
||||
}
|
||||
|
||||
// @public
|
||||
export interface LeaseManager {
|
||||
acquireLease(lease: CompleteLease): Promise<boolean>;
|
||||
createAllLeasesIfNotExists(partitionIds: string[]): Promise<void>;
|
||||
createLeaseStoreIfNotExists(): Promise<void>;
|
||||
deleteLease(lease: CompleteLease): Promise<void>;
|
||||
deleteLeaseStore(): Promise<void>;
|
||||
getAllLeases(): Promise<BaseLease[]>;
|
||||
getLease(partitionId: string): Promise<CompleteLease | undefined>;
|
||||
leaseDuration: number;
|
||||
leaseRenewInterval: number;
|
||||
leaseStoreExists(): Promise<boolean>;
|
||||
releaseLease(lease: CompleteLease): Promise<void>;
|
||||
renewLease(lease: CompleteLease): Promise<boolean>;
|
||||
updateLease(lease: CompleteLease): Promise<boolean>;
|
||||
}
|
||||
|
||||
export { MessagingError }
|
||||
|
||||
// @public
|
||||
export type OnEphError = (error: EPHDiagnosticInfo) => void;
|
||||
|
||||
export { OnError }
|
||||
|
||||
// @public
|
||||
export type OnReceivedError = (error: MessagingError | Error) => void;
|
||||
|
||||
// @public
|
||||
export type OnReceivedMessage = (context: PartitionContext, eventData: EventData) => void;
|
||||
|
||||
// @public
|
||||
export class PartitionContext {
|
||||
// Warning: (ae-forgotten-export) The symbol "HostContextWithCheckpointLeaseManager" needs to be exported by the entry point index.d.ts
|
||||
constructor(context: HostContextWithCheckpointLeaseManager, partitionId: string, lease: CompleteLease);
|
||||
checkpoint(): Promise<void>;
|
||||
checkpointFromEventData(eventData: EventData): Promise<void>;
|
||||
get consumerGroup(): string;
|
||||
get eventhubPath(): string;
|
||||
// (undocumented)
|
||||
getInitialOffset(): Promise<EventPosition>;
|
||||
lease: CompleteLease;
|
||||
get owner(): string;
|
||||
readonly partitionId: string;
|
||||
setOffsetAndSequenceNumber(eventData: EventData): void;
|
||||
}
|
||||
|
||||
|
||||
// (No @packageDocumentation comment for this package)
|
||||
|
||||
```
|
|
@ -1,33 +0,0 @@
|
|||
## Getting started with samples
|
||||
|
||||
## Install the library
|
||||
|
||||
Run the below in your samples folder to install the npm package for Event Processor Host library.
|
||||
|
||||
```bash
|
||||
npm install @azure/event-processor-host
|
||||
```
|
||||
|
||||
## Get connection string & Event Hubs name
|
||||
|
||||
- In the [Azure Portal](https://portal.azure.com), go to **Dashboard > Event Hubs > _your-eventhubs-namespace_**.
|
||||
- If you don't have a Event Hubs resource, then Here are the docs which would help you create a EventHubs/Iothub resources in the portal:
|
||||
- [Azure Event Hubs - NodeJS DOCS](https://docs.microsoft.com/azure/event-hubs/event-hubs-node-get-started-send).
|
||||
- [Azure IoT Hubs - NodeJS DOCS](https://docs.microsoft.com/azure/iot-hub/iot-hub-node-node-module-twin-getstarted).
|
||||
- Note down the "Primary Connection String" of **RootManageSharedAccessKey** at **Shared access policies** under **Settings** tab.
|
||||
- Find the "Event Hubs" tab right under "Entities" at **_your-eventhubs-namespace_**, create a Event Hub and note down its name.
|
||||
> _Note : **RootManageSharedAccessKey** is automatically created for the namespace and has permissions for the entire namespace. If you want to use restricted access, refer [Shared Access Signatures](https://docs.microsoft.com/rest/api/eventhub/generate-sas-token), create the Access Keys exclusive to the specific Event Hubs._
|
||||
- You would also need an Azure Storage Account when working with Event Processor Host. The docs are at
|
||||
[Create a storage account](https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal). After the storage account is created, find the connection string at **Access keys** tab in the **Settings** section.
|
||||
|
||||
Before running a sample, update it with the connection string and the hub name you have noted down above.
|
||||
|
||||
## Running a sample
|
||||
|
||||
Copy the sample to your samples folder and use `node` to run it.
|
||||
|
||||
```bash
|
||||
node sample.js
|
||||
```
|
||||
|
||||
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Feventhub%2Fevent-processor-host%2Fsamples%2FREADME.png)
|
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use Event Processor Host to receive events from all partitions
|
||||
of an IoTHub instance. It also shows how to checkpoint metadata for received events at regular
|
||||
intervals in an Azure Storage Blob.
|
||||
*/
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
// Define IoT Hub and storage connection strings here
|
||||
const iotConnectionString = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// Use `createHostName` to create a unique name based on given prefix to use different storage containers on each run if needed.
|
||||
const storageContainerName = EventProcessorHost.createHostName("iothub-container");
|
||||
const ephName = "my-iothub-eph";
|
||||
|
||||
async function main() {
|
||||
// Start eph.
|
||||
const eph = await startEph(ephName);
|
||||
// Sleeeping for 90 seconds. This will give time for eph to receive messages.
|
||||
await delay(90000);
|
||||
// After 90 seconds stop eph.
|
||||
await stopEph(eph);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Exiting from main() due to an error: %O.", err);
|
||||
});
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName) {
|
||||
// Create an Event Processor Host from an IotHub ConnectionString
|
||||
const eph = await EventProcessorHost.createFromIotHubConnectionString(
|
||||
ephName,
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
iotConnectionString,
|
||||
{
|
||||
onEphError: (error) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount = {};
|
||||
const onMessage = async (context, event) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event received for a given partition.
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError = (error) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph) {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use multiple instances of Event Processor Host in the same process
|
||||
to receive events from all partitions. It also shows how to checkpoint metadata for received events
|
||||
at regular intervals in an Azure Storage Blob.
|
||||
|
||||
If your Event Hubs instance doesn't have any events, then please run "sendBatch.ts" sample
|
||||
to populate Event Hubs before running this sample.
|
||||
|
||||
See https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-event-processor-host
|
||||
to learn about Event Processor Host.
|
||||
*/
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
// Define Storage and Event Hubs connection strings and related Event Hubs entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
const ephName1 = "eph-1";
|
||||
const ephName2 = "eph-2";
|
||||
|
||||
// Use `createHostName` to create a unique name based on given prefix to use different storage containers on each run if needed.
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
|
||||
async function main() {
|
||||
// Start eph-1.
|
||||
const eph1 = await startEph(ephName1);
|
||||
await delay(20000);
|
||||
// After 20 seconds start eph-2.
|
||||
const eph2 = await startEph(ephName2);
|
||||
await delay(90000);
|
||||
// Now, load will be evenly balanced between eph-1 and eph-2. After 90 seconds stop eph-1.
|
||||
await stopEph(eph1);
|
||||
await delay(40000);
|
||||
// Now, eph-1 will regain access to all the partitions and will close after 40 seconds.
|
||||
await stopEph(eph2);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName) {
|
||||
// Create the Event Processor Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
ephName,
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
ehConnectionString,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
// This method will provide errors that occur during lease and partition management. The
|
||||
// errors that occur while receiving messages will be provided in the onError handler
|
||||
// provided in the eph.start() method.
|
||||
onEphError: (error) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount = {};
|
||||
const onMessage = async (context, event) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError = (error) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph) {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"name": "@azure-samples/event-processor-host-ts",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"description": "Azure Event Process Host (Event Hubs) library samples for JavaScript",
|
||||
"engine": {
|
||||
"node": ">=12.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/Azure/azure-sdk-for-js.git"
|
||||
},
|
||||
"keywords": [
|
||||
"Azure",
|
||||
"Event Hubs",
|
||||
"Event Processor",
|
||||
"Node.js",
|
||||
"JavaScript"
|
||||
],
|
||||
"author": "Microsoft Corporation",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/Azure/azure-sdk-for-js/issues"
|
||||
},
|
||||
"homepage": "https://github.com/Azure/azure-sdk-for-js#readme",
|
||||
"sideEffects": false,
|
||||
"dependencies": {
|
||||
"@azure/event-processor-host": "latest",
|
||||
"dotenv": "^8.2.0",
|
||||
"tslib": "^1.9.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rimraf": "^3.0.0"
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
The sample to populate your Event Hubs instance with events before you try
|
||||
any of the other samples that show how to receive the events from Event Hubs is moved to
|
||||
https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-hubs/samples/javascript/sendEvents.js
|
||||
*/
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use Event Processor Host to receive events from all partitions
|
||||
of an Event Hub instance. It also shows how to checkpoint metadata for received events at regular
|
||||
intervals in an Azure Storage Blob.
|
||||
|
||||
If your Event Hubs instance doesn't have any events, then please run "sendBatch.ts" sample
|
||||
to populate Event Hubs before running this sample.
|
||||
|
||||
See https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-event-processor-host
|
||||
to learn about Event Processor Host.
|
||||
*/
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
const { EventProcessorHost, delay } = require("@azure/event-processor-host");
|
||||
|
||||
// Define storage connection string and Event Hubs connection string and related entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// if you want to create a unique storageContainer name for every run, use `createHostName` function, otherwise
|
||||
// provide storageContainer name here.
|
||||
// const storageContainerName = "my-container";
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
const ephName = "my-eph";
|
||||
|
||||
async function main() {
|
||||
// Start eph.
|
||||
const eph = await startEph(ephName);
|
||||
// Sleeeping for 90 seconds. This will give time for eph to receive messages.
|
||||
await delay(90000);
|
||||
// After 90 seconds stop eph.
|
||||
await stopEph(eph);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName) {
|
||||
// Create the Event Processor Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(ephName),
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
ehConnectionString,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
onEphError: (error) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount = {};
|
||||
const onMessage = async (context, event) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event received for a given partition.
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError = (error) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph) {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use WebSockets to enable Event Processor host to work over
|
||||
an HTTP proxy and in environments where the standard AMQP port 5671 is blocked.
|
||||
|
||||
This sample uses 2 external libraries
|
||||
- The `ws` library to provide a WebSocket implementation to the EPH library.
|
||||
- The `https-proxy-agent` to enable the `ws` library to work with a proxy server.
|
||||
*/
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
const { EventProcessorHost } = require("@azure/event-processor-host");
|
||||
const WebSocket = require("ws");
|
||||
const url = require("url");
|
||||
const httpsProxyAgent = require("https-proxy-agent");
|
||||
|
||||
// Define storage connection string and Event Hubs connection string and related entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// if you want to create a unique storageContainer name for every run, use `createHostName` function, otherwise
|
||||
// provide storageContainer name here.
|
||||
// const storageContainerName = "my-container";
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
const ephName = "my-eph";
|
||||
|
||||
// Create an instance of the `HttpsProxyAgent` class with the proxy server information like
|
||||
// proxy url, username and password
|
||||
// Skip this section if you are not behind a proxy server
|
||||
const urlParts = url.parse("http://localhost:3128");
|
||||
urlParts.auth = "username:password"; // Skip this if proxy server does not need authentication.
|
||||
const proxyAgent = new httpsProxyAgent(urlParts);
|
||||
|
||||
async function main() {
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(ephName),
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
ehConnectionString,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
onEphError: (error) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
},
|
||||
webSocket: WebSocket,
|
||||
webSocketConstructorOptions: { agent: proxyAgent }
|
||||
}
|
||||
);
|
||||
/*
|
||||
Refer to other samples, and place your code here to receive events
|
||||
*/
|
||||
await eph.stop();
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "commonjs"
|
||||
},
|
||||
"include": ["**/*.ts"],
|
||||
"exclude": ["../node_modules", "../typings/**"]
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
## Getting started with samples
|
||||
|
||||
## Install the library
|
||||
|
||||
Run the below in your samples folder to install the npm package for Event Processor Host library.
|
||||
|
||||
```bash
|
||||
npm install @azure/event-processor-host
|
||||
```
|
||||
|
||||
## Get connection string & Event Hubs name
|
||||
|
||||
- In the [Azure Portal](https://portal.azure.com), go to **Dashboard > Event Hubs > _your-eventhubs-namespace_**.
|
||||
- If you don't have a Event Hubs resource, then Here are the docs which would help you create a EventHubs/Iothub resources in the portal:
|
||||
- [Azure Event Hubs - NodeJS DOCS](https://docs.microsoft.com/azure/event-hubs/event-hubs-node-get-started-send).
|
||||
- [Azure IoT Hubs - NodeJS DOCS](https://docs.microsoft.com/azure/iot-hub/iot-hub-node-node-module-twin-getstarted).
|
||||
- Note down the "Primary Connection String" of **RootManageSharedAccessKey** at **Shared access policies** under **Settings** tab.
|
||||
- Find the "Event Hubs" tab right under "Entities" at **_your-eventhubs-namespace_**, create a Event Hub and note down its name.
|
||||
> _Note : **RootManageSharedAccessKey** is automatically created for the namespace and has permissions for the entire namespace. If you want to use restricted access, refer [Shared Access Signatures](https://docs.microsoft.com/rest/api/eventhub/generate-sas-token), create the Access Keys exclusive to the specific Event Hubs._
|
||||
- You would also need an Azure Storage Account when working with Event Processor Host. The docs are at
|
||||
[Create a storage account](https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal). After the storage account is created, find the connection string at **Access keys** tab in the **Settings** section.
|
||||
|
||||
Before running a sample, update it with the connection string and the hub name you have noted down above.
|
||||
|
||||
## Running a sample
|
||||
|
||||
If you don't have Typescript installed, then use `npm` to install it first.
|
||||
|
||||
```bash
|
||||
npm install -g typescript
|
||||
```
|
||||
|
||||
One way to run Typescript samples is to use `ts-node`. To install `ts-node`, run the below in your sample folder
|
||||
|
||||
```bash
|
||||
npm install ts-node
|
||||
```
|
||||
|
||||
Copy the sample to your samples folder and use `ts-node` to run it.
|
||||
|
||||
```bash
|
||||
ts-node sample.ts
|
||||
```
|
||||
|
||||
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Feventhub%2Fevent-processor-host%2Fsamples%2FREADME.png)
|
|
@ -1,40 +0,0 @@
|
|||
{
|
||||
"name": "@azure-samples/event-processor-host-ts",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"description": "Azure Event Process Host (Event Hubs) library samples for TypeScript",
|
||||
"engine": {
|
||||
"node": ">=12.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/Azure/azure-sdk-for-js.git"
|
||||
},
|
||||
"keywords": [
|
||||
"Azure",
|
||||
"Event Hubs",
|
||||
"Event Processor",
|
||||
"Node.js",
|
||||
"TypeScript"
|
||||
],
|
||||
"author": "Microsoft Corporation",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/Azure/azure-sdk-for-js/issues"
|
||||
},
|
||||
"homepage": "https://github.com/Azure/azure-sdk-for-js#readme",
|
||||
"sideEffects": false,
|
||||
"scripts": {
|
||||
"build": "tsc -p ."
|
||||
},
|
||||
"dependencies": {
|
||||
"@azure/event-processor-host": "latest",
|
||||
"dotenv": "^8.2.0",
|
||||
"tslib": "^1.9.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^12.12.17",
|
||||
"rimraf": "^3.0.0",
|
||||
"typescript": "^3.7.2"
|
||||
}
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use Event Processor Host to receive events from all partitions
|
||||
of an IoTHub instance. It also shows how to checkpoint metadata for received events at regular
|
||||
intervals in an Azure Storage Blob.
|
||||
*/
|
||||
|
||||
import {
|
||||
EventProcessorHost,
|
||||
OnReceivedError,
|
||||
OnReceivedMessage,
|
||||
EventData,
|
||||
PartitionContext,
|
||||
delay
|
||||
} from "@azure/event-processor-host";
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
// Define IoT Hub and storage connection strings here
|
||||
const iotConnectionString = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// Use `createHostName` to create a unique name based on given prefix to use different storage containers on each run if needed.
|
||||
const storageContainerName = EventProcessorHost.createHostName("iothub-container");
|
||||
const ephName = "my-iothub-eph";
|
||||
|
||||
export async function main(): Promise<void> {
|
||||
// Start eph.
|
||||
const eph = await startEph(ephName);
|
||||
// Sleeeping for 90 seconds. This will give time for eph to receive messages.
|
||||
await delay(90000);
|
||||
// After 90 seconds stop eph.
|
||||
await stopEph(eph);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName: string): Promise<EventProcessorHost> {
|
||||
// Create an Event Processor Host from an IotHub ConnectionString
|
||||
const eph = await EventProcessorHost.createFromIotHubConnectionString(
|
||||
ephName,
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
iotConnectionString,
|
||||
{
|
||||
onEphError: (error: any) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount: { [x: string]: number } = {};
|
||||
const onMessage: OnReceivedMessage = async (context: PartitionContext, event: EventData) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event received for a given partition.
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err: any) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError: OnReceivedError = (error: any) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph: EventProcessorHost): Promise<void> {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Exiting from main() due to an error: %O.", err);
|
||||
});
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use multiple instances of Event Processor Host in the same process
|
||||
to receive events from all partitions. It also shows how to checkpoint metadata for received events
|
||||
at regular intervals in an Azure Storage Blob.
|
||||
|
||||
If your Event Hubs instance doesn't have any events, then please run "sendBatch.ts" sample
|
||||
to populate Event Hubs before running this sample.
|
||||
|
||||
See https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-event-processor-host
|
||||
to learn about Event Processor Host.
|
||||
*/
|
||||
|
||||
import {
|
||||
EventProcessorHost,
|
||||
OnReceivedError,
|
||||
OnReceivedMessage,
|
||||
EventData,
|
||||
PartitionContext,
|
||||
delay
|
||||
} from "@azure/event-processor-host";
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
// Define Storage and Event Hubs connection strings and related Event Hubs entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
const ephName1 = "eph-1";
|
||||
const ephName2 = "eph-2";
|
||||
|
||||
// Use `createHostName` to create a unique name based on given prefix to use different storage containers on each run if needed.
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
|
||||
export async function main(): Promise<void> {
|
||||
// Start eph-1.
|
||||
const eph1 = await startEph(ephName1);
|
||||
await delay(20000);
|
||||
// After 20 seconds start eph-2.
|
||||
const eph2 = await startEph(ephName2);
|
||||
await delay(90000);
|
||||
// Now, load will be evenly balanced between eph-1 and eph-2. After 90 seconds stop eph-1.
|
||||
await stopEph(eph1);
|
||||
await delay(40000);
|
||||
// Now, eph-1 will regain access to all the partitions and will close after 40 seconds.
|
||||
await stopEph(eph2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName: string): Promise<EventProcessorHost> {
|
||||
// Create the Event Processor Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
ephName,
|
||||
storageConnectionString!,
|
||||
storageContainerName,
|
||||
ehConnectionString!,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
// This method will provide errors that occur during lease and partition management. The
|
||||
// errors that occur while receiving messages will be provided in the onError handler
|
||||
// provided in the eph.start() method.
|
||||
onEphError: (error: any) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount: { [x: string]: number } = {};
|
||||
const onMessage: OnReceivedMessage = async (context: PartitionContext, event: EventData) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err: any) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError: OnReceivedError = (error: any) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph: EventProcessorHost): Promise<void> {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
|
@ -1,8 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
The sample to populate your Event Hubs instance with events before you try
|
||||
any of the other samples that show how to receive the events from Event Hubs is moved to
|
||||
https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/event-hubs/samples/typescript/src/sendEvents.ts
|
||||
*/
|
|
@ -1,127 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use Event Processor Host to receive events from all partitions
|
||||
of an Event Hub instance. It also shows how to checkpoint metadata for received events at regular
|
||||
intervals in an Azure Storage Blob.
|
||||
|
||||
If your Event Hubs instance doesn't have any events, then please run "sendBatch.ts" sample
|
||||
to populate Event Hubs before running this sample.
|
||||
|
||||
See https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-event-processor-host
|
||||
to learn about Event Processor Host.
|
||||
*/
|
||||
|
||||
import {
|
||||
EventProcessorHost,
|
||||
OnReceivedError,
|
||||
OnReceivedMessage,
|
||||
EventData,
|
||||
PartitionContext,
|
||||
delay
|
||||
} from "@azure/event-processor-host";
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
// Define storage connection string and Event Hubs connection string and related entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// if you want to create a unique storageContainer name for every run, use `createHostName` function, otherwise
|
||||
// provide storageContainer name here.
|
||||
// const storageContainerName = "my-container";
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
const ephName = "my-eph";
|
||||
|
||||
export async function main(): Promise<void> {
|
||||
// Start eph.
|
||||
const eph = await startEph(ephName);
|
||||
// Sleeeping for 90 seconds. This will give time for eph to receive messages.
|
||||
await delay(90000);
|
||||
// After 90 seconds stop eph.
|
||||
await stopEph(eph);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EPH with the given name and starts the EPH.
|
||||
* @param ephName The name of the EPH.
|
||||
* @returns {Promise<EventProcessorHost>} Promise<EventProcessorHost>
|
||||
*/
|
||||
async function startEph(ephName: string): Promise<EventProcessorHost> {
|
||||
// Create the Event Processo Host
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(ephName),
|
||||
storageConnectionString!,
|
||||
storageContainerName,
|
||||
ehConnectionString!,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
onEphError: (error: any) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
// Message handler
|
||||
const partionCount: { [x: string]: number } = {};
|
||||
const onMessage: OnReceivedMessage = async (context: PartitionContext, event: EventData) => {
|
||||
!partionCount[context.partitionId]
|
||||
? (partionCount[context.partitionId] = 1)
|
||||
: partionCount[context.partitionId]++;
|
||||
console.log(
|
||||
"[%s] %d - Received message from partition: '%s', offset: '%s'",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
context.partitionId,
|
||||
event.offset
|
||||
);
|
||||
// Checkpointing every 100th event received for a given partition.
|
||||
if (partionCount[context.partitionId] % 100 === 0) {
|
||||
try {
|
||||
console.log(
|
||||
"[%s] EPH is currently receiving messages from partitions: %O",
|
||||
ephName,
|
||||
eph.receivingFromPartitions
|
||||
);
|
||||
await context.checkpointFromEventData(event);
|
||||
console.log(
|
||||
"[%s] Successfully checkpointed message number %d",
|
||||
ephName,
|
||||
partionCount[context.partitionId]
|
||||
);
|
||||
} catch (err: any) {
|
||||
console.log(
|
||||
"[%s] An error occurred while checkpointing msg number %d: %O",
|
||||
ephName,
|
||||
partionCount[context.partitionId],
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Error handler
|
||||
const onError: OnReceivedError = (error: any) => {
|
||||
console.log("[%s] Received Error: %O", ephName, error);
|
||||
};
|
||||
console.log("Starting the EPH - %s", ephName);
|
||||
await eph.start(onMessage, onError);
|
||||
return eph;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given EventProcessorHost.
|
||||
* @param eph The event processor host.
|
||||
* @returns {Promise<void>} Promise<void>
|
||||
*/
|
||||
async function stopEph(eph: EventProcessorHost): Promise<void> {
|
||||
console.log("Stopping the EPH - '%s'.", eph.hostName);
|
||||
await eph.stop();
|
||||
console.log("Successfully stopped the EPH - '%s'.", eph.hostName);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
Licensed under the MIT Licence.
|
||||
|
||||
This sample demonstrates how to use WebSockets to enable Event Processor host to work over
|
||||
an HTTP proxy and in environments where the standard AMQP port 5671 is blocked.
|
||||
|
||||
This sample uses 2 external libraries
|
||||
- The `ws` library to provide a WebSocket implementation to the EPH library.
|
||||
- The `https-proxy-agent` to enable the `ws` library to work with a proxy server.
|
||||
*/
|
||||
|
||||
import { EventProcessorHost } from "@azure/event-processor-host";
|
||||
import WebSocket from "ws";
|
||||
|
||||
console.warn(
|
||||
"The package @azure/event-processor-host is deprecated in favor of @azure/event-hubs and @azure/eventhubs-checkpointstore-blob"
|
||||
);
|
||||
|
||||
const url = require("url");
|
||||
const httpsProxyAgent = require("https-proxy-agent");
|
||||
|
||||
// Define storage connection string and Event Hubs connection string and related entity name here
|
||||
const ehConnectionString = "";
|
||||
const eventHubsName = "";
|
||||
const storageConnectionString = "";
|
||||
|
||||
// if you want to create a unique storageContainer name for every run, use `createHostName` function, otherwise
|
||||
// provide storageContainer name here.
|
||||
// const storageContainerName = "my-container";
|
||||
const storageContainerName = EventProcessorHost.createHostName("test-container");
|
||||
const ephName = "my-eph";
|
||||
|
||||
// Create an instance of the `HttpsProxyAgent` class with the proxy server information like
|
||||
// proxy url, username and password
|
||||
// Skip this section if you are not behind a proxy server
|
||||
const urlParts = url.parse("http://localhost:3128");
|
||||
urlParts.auth = "username:password"; // Skip this if proxy server does not need authentication.
|
||||
const proxyAgent = new httpsProxyAgent(urlParts);
|
||||
|
||||
export async function main(): Promise<void> {
|
||||
const eph = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(ephName),
|
||||
storageConnectionString,
|
||||
storageContainerName,
|
||||
ehConnectionString,
|
||||
{
|
||||
eventHubPath: eventHubsName,
|
||||
onEphError: (error: any) => {
|
||||
console.log("[%s] Error: %O", ephName, error);
|
||||
},
|
||||
webSocket: WebSocket,
|
||||
webSocketConstructorOptions: { agent: proxyAgent }
|
||||
}
|
||||
);
|
||||
/*
|
||||
Refer to other samples, and place your code here to receive events
|
||||
*/
|
||||
await eph.stop();
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.log("Error occurred: ", err);
|
||||
});
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "node",
|
||||
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"esModuleInterop": true,
|
||||
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
},
|
||||
"include": ["src/**.ts"],
|
||||
"exclude": ["node_modules"]
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { BlobService, CreateContainerResult } from "./blobService";
|
||||
import { BlobService as StorageBlobService } from "azure-storage";
|
||||
import { Dictionary } from "@azure/event-hubs";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class AzureBlob {
|
||||
private _blobService: BlobService;
|
||||
private _containerName: string;
|
||||
private _blobPath: string;
|
||||
private _containerAndBlobExist: boolean = false;
|
||||
|
||||
constructor(
|
||||
hostName: string,
|
||||
connectionString: string,
|
||||
containerName: string,
|
||||
blob: string,
|
||||
blobService?: BlobService
|
||||
) {
|
||||
this._blobPath = blob;
|
||||
this._containerName = containerName;
|
||||
this._blobService = blobService || BlobService.create(hostName, connectionString);
|
||||
}
|
||||
|
||||
async ensureContainerAndBlobExist(): Promise<void> {
|
||||
try {
|
||||
if (!this._containerAndBlobExist) {
|
||||
await this._blobService.ensureContainerAndBlobExist(this._containerName, this._blobPath);
|
||||
this._containerAndBlobExist = true;
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while ensuring that the container and blob exists. ` +
|
||||
`It is: \n${err ? err.stack : JSON.stringify(err)}`;
|
||||
throw new Error(msg);
|
||||
}
|
||||
}
|
||||
|
||||
ensureContainerExists(): Promise<CreateContainerResult> {
|
||||
return this._blobService.ensureContainerExists(this._containerName);
|
||||
}
|
||||
|
||||
doesContainerExist(): Promise<boolean> {
|
||||
return this._blobService.doesContainerExist(this._containerName);
|
||||
}
|
||||
|
||||
doesBlobExist(): Promise<boolean> {
|
||||
return this._blobService.doesBlobExist(this._containerName, this._blobPath);
|
||||
}
|
||||
|
||||
ensureBlobExists(text: string): Promise<void> {
|
||||
return this._blobService.ensureBlobExists(this._containerName, this._blobPath, text);
|
||||
}
|
||||
|
||||
renewLease(
|
||||
leaseId: string,
|
||||
options: StorageBlobService.LeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
return this._blobService.renewLease(this._containerName, this._blobPath, leaseId, options);
|
||||
}
|
||||
|
||||
releaseLease(
|
||||
leaseId: string,
|
||||
options?: StorageBlobService.LeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
return this._blobService.releaseLease(this._containerName, this._blobPath, leaseId, options);
|
||||
}
|
||||
|
||||
updateContent(
|
||||
text: string,
|
||||
options?: StorageBlobService.CreateBlobRequestOptions
|
||||
): Promise<StorageBlobService.BlobResult> {
|
||||
return this._blobService.updateContent(this._containerName, this._blobPath, text, options);
|
||||
}
|
||||
|
||||
getContent(options?: StorageBlobService.GetBlobRequestOptions): Promise<string> {
|
||||
return this._blobService.getContent(this._containerName, this._blobPath, options);
|
||||
}
|
||||
|
||||
changeLease(
|
||||
currentLeaseId: string,
|
||||
proposedLeaseId: string
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
return this._blobService.changeLease(
|
||||
this._containerName,
|
||||
this._blobPath,
|
||||
currentLeaseId,
|
||||
proposedLeaseId
|
||||
);
|
||||
}
|
||||
|
||||
getBlobProperties(): Promise<StorageBlobService.BlobResult> {
|
||||
return this._blobService.getBlobProperties(this._containerName, this._blobPath);
|
||||
}
|
||||
|
||||
getBlobMetadata(): Promise<StorageBlobService.BlobResult> {
|
||||
return this._blobService.getBlobMetadata(this._containerName, this._blobPath);
|
||||
}
|
||||
|
||||
setBlobMetadata(
|
||||
metadata: Dictionary<string>,
|
||||
options?: StorageBlobService.BlobRequestOptions
|
||||
): Promise<StorageBlobService.BlobResult> {
|
||||
return this._blobService.setBlobMetadata(
|
||||
this._containerName,
|
||||
this._blobPath,
|
||||
metadata,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
listBlobsSegmented(
|
||||
options?: StorageBlobService.ListBlobsSegmentedRequestOptions
|
||||
): Promise<StorageBlobService.ListBlobsResult> {
|
||||
return this._blobService.listBlobsSegmented(this._containerName, options);
|
||||
}
|
||||
|
||||
acquireLease(
|
||||
options: StorageBlobService.AcquireLeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
return this._blobService.acquireLease(this._containerName, this._blobPath, options);
|
||||
}
|
||||
|
||||
deleteBlobIfExists(): Promise<void> {
|
||||
return this._blobService.deleteBlobIfExists(this._containerName, this._blobPath);
|
||||
}
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { CompleteLeaseInfo, CompleteLease } from "./completeLease";
|
||||
import { AzureBlob } from "./azureBlob";
|
||||
import * as log from "./log";
|
||||
|
||||
/**
|
||||
* Describes the properties of a lease.
|
||||
* @interface LeaseInfo
|
||||
*/
|
||||
export interface LeaseInfo extends CompleteLeaseInfo {
|
||||
/**
|
||||
* @property {string} token The lease token that manages concurrency between hosts. You can use
|
||||
* this token to guarantee single access to any resource needed by the EPH.
|
||||
*/
|
||||
token: string;
|
||||
/**
|
||||
* @property {string} sequenceNumber The sequence number of the event to be checked in.
|
||||
*/
|
||||
sequenceNumber: number;
|
||||
/**
|
||||
* @property {string} offset The offset of the event to be checked in.
|
||||
*/
|
||||
offset?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the properties of a lease representing an Azure Blob.
|
||||
* @interface AzureBlobLeaseInfo
|
||||
*/
|
||||
export interface AzureBlobLeaseInfo extends LeaseInfo {
|
||||
/**
|
||||
* @property {AzureBlob} blob Reference to the azure blob.
|
||||
*/
|
||||
blob: AzureBlob;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the lease used with an Azure Blob for storing the checkpoint information.
|
||||
*/
|
||||
export class AzureBlobLease extends CompleteLease implements AzureBlobLeaseInfo {
|
||||
// It is important to keep the offset optional. While getting the startingCheckpoint in
|
||||
// PartitionContext.getInitialOffset(), we internally call leaseManager.getCheckpoint() which will
|
||||
// return undefined, if the offset is undefined. At that time, if the user had provided
|
||||
// initialOffset using the EPHOptions then that will be used. Thus not initializing the offset
|
||||
// with default value of "-1" is crucial to ensure that user provided initial offset is honored
|
||||
// when a new lease container is used.
|
||||
/**
|
||||
* @property {string} offset The offset of the event to be checked in.
|
||||
*/
|
||||
offset?: string;
|
||||
/**
|
||||
* @property {string} sequenceNumber The sequence number of the event to be checked in.
|
||||
*/
|
||||
sequenceNumber: number;
|
||||
/**
|
||||
* @property {string} token The lease token that manages concurrency between hosts. You can use
|
||||
* this token to guarantee single access to any resource needed by the EPH.
|
||||
*/
|
||||
token: string;
|
||||
/**
|
||||
* @property {AzureBlob} blob Reference to the azure blob.
|
||||
*/
|
||||
blob: AzureBlob;
|
||||
|
||||
constructor(info: AzureBlobLeaseInfo) {
|
||||
super(info);
|
||||
this.offset = info.offset;
|
||||
this.sequenceNumber = info.sequenceNumber != undefined ? info.sequenceNumber : 0;
|
||||
this.token = info.token || "";
|
||||
this.blob = info.blob;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the lease information.
|
||||
* @returns {LeaseInfo} LeaseInfo.
|
||||
*/
|
||||
getInfo(): LeaseInfo {
|
||||
const info = super.getInfo() as LeaseInfo;
|
||||
info.sequenceNumber = this.sequenceNumber;
|
||||
info.token = this.token;
|
||||
info.offset = this.offset;
|
||||
log.azurebloblease("[%s] [%s] Lease info is: %o", this.owner, this.partitionId, info);
|
||||
return info as LeaseInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes the lease information.
|
||||
* @returns {string} string The serialized lease info.
|
||||
*/
|
||||
serialize(): string {
|
||||
return JSON.stringify(this.getInfo());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Lease for the given partitionId.
|
||||
* @param {string} id The partitionId for which the lease needs to be created.
|
||||
* @param {AzureBlob} blob The azure blob reference
|
||||
* @returns {CompleteLease} Lease.
|
||||
*/
|
||||
static createFromPartitionId(id: string, blob: AzureBlob): AzureBlobLease {
|
||||
return new AzureBlobLease({
|
||||
partitionId: id,
|
||||
epoch: -1,
|
||||
sequenceNumber: 0,
|
||||
owner: "",
|
||||
token: "",
|
||||
blob: blob
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,572 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { v4 as uuid } from "uuid";
|
||||
import { CheckpointInfo } from "./checkpointInfo";
|
||||
import { CheckpointManager } from "./checkpointManager";
|
||||
import { LeaseManager } from "./leaseManager";
|
||||
import { BaseHostContext } from "./hostContext";
|
||||
import { AzureBlob } from "./azureBlob";
|
||||
import { validateType, getStorageError, EPHActionStrings } from "./util/utils";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import { AzureBlobLease, AzureBlobLeaseInfo, LeaseInfo } from "./azureBlobLease";
|
||||
import { BlobService as StorageBlobService, StorageError } from "azure-storage";
|
||||
import { LeaseState } from "./blobService";
|
||||
import { BaseLease, BaseLeaseInfo } from "./baseLease";
|
||||
import { EPHDiagnosticInfo } from "./modelTypes";
|
||||
import {
|
||||
maximumExecutionTimeInMsForLeaseRenewal,
|
||||
metadataOwnerName,
|
||||
leaseLost,
|
||||
leaseIdMismatchWithLeaseOperation,
|
||||
leaseIdMismatchWithBlobOperation
|
||||
} from "./util/constants";
|
||||
import * as log from "./log";
|
||||
import path from "path";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
enum UploadActivity {
|
||||
create = "create",
|
||||
acquire = "acquire",
|
||||
release = "release",
|
||||
update = "update"
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class AzureStorageCheckpointLeaseManager implements CheckpointManager, LeaseManager {
|
||||
leaseRenewInterval: number;
|
||||
leaseDuration: number;
|
||||
private _context: BaseHostContext;
|
||||
private _latestCheckpoint: Map<string, CheckpointInfo> = new Map<string, CheckpointInfo>();
|
||||
|
||||
constructor(context: BaseHostContext) {
|
||||
this._context = context;
|
||||
this.leaseDuration = this._context.leaseDuration;
|
||||
this.leaseRenewInterval = this._context.leaseRenewInterval;
|
||||
}
|
||||
|
||||
getAzureBlob(partitionId: string): AzureBlob {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
let result = this._context.blobReferenceByPartition[partitionId];
|
||||
if (!result) {
|
||||
const blobPath = `${this._context.composedBlobPrefix}${partitionId}`;
|
||||
result = new AzureBlob(
|
||||
this._context.hostName,
|
||||
this._context.storageConnectionString!,
|
||||
this._context.storageContainerName!,
|
||||
blobPath,
|
||||
this._context.blobService
|
||||
);
|
||||
this._context.blobReferenceByPartition[partitionId] = result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async downloadLease(partitionId: string, blob: AzureBlob): Promise<AzureBlobLease> {
|
||||
try {
|
||||
const text: string = await blob.getContent();
|
||||
const jsonLease: LeaseInfo = JSON.parse(text);
|
||||
const blobLeaseInfo: AzureBlobLeaseInfo = {
|
||||
...jsonLease,
|
||||
blob: blob
|
||||
};
|
||||
return new AzureBlobLease(blobLeaseInfo);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while downloading the lease for blobPath ` +
|
||||
`"${this._context.composedBlobPrefix}${partitionId}". It is: \n` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(this._context.withHost(msg));
|
||||
throw new Error(msg);
|
||||
}
|
||||
}
|
||||
|
||||
leaseStoreExists(): Promise<boolean> {
|
||||
return this._context.blobService!.doesContainerExist(this._context.storageContainerName!);
|
||||
}
|
||||
|
||||
async createLeaseStoreIfNotExists(): Promise<void> {
|
||||
await this._context.blobService!.ensureContainerExists(this._context.storageContainerName!);
|
||||
return;
|
||||
}
|
||||
|
||||
async deleteLeaseStore(): Promise<void> {
|
||||
const blobService = this._context.blobService;
|
||||
const storageContainerName = this._context.storageContainerName!;
|
||||
try {
|
||||
if (blobService) {
|
||||
const listResult = await blobService.listBlobsSegmented(storageContainerName);
|
||||
const deleteBlobs: Promise<void>[] = [];
|
||||
for (const blob of listResult.entries) {
|
||||
deleteBlobs.push(blobService.deleteBlobIfExists(storageContainerName, blob.name));
|
||||
}
|
||||
await Promise.all(deleteBlobs);
|
||||
await blobService.deleteContainerIfExists(storageContainerName);
|
||||
} else {
|
||||
throw new Error(
|
||||
"'blobService' is not defined in the 'hostContext', hence cannot " + "list all the blobs."
|
||||
);
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while deleting the lease store '${storageContainerName}': %O` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(this._context.withHost(msg));
|
||||
const info: EPHDiagnosticInfo = {
|
||||
error: new Error(msg),
|
||||
hostName: this._context.hostName,
|
||||
partitionId: "N/A",
|
||||
action: EPHActionStrings.deletingLeaseStore
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
}
|
||||
}
|
||||
|
||||
async getLease(partitionId: string): Promise<AzureBlobLease | undefined> {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
let result: AzureBlobLease | undefined;
|
||||
const blob = this.getAzureBlob(partitionId);
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
log.checkpointLeaseMgr(withHostAndPartition(partitionId, "Getting lease."));
|
||||
try {
|
||||
if (await blob.doesBlobExist()) {
|
||||
result = await this.downloadLease(partitionId, blob);
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while getting lease for partitionId '${partitionId}': \n` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(partitionId, msg));
|
||||
throw new Error(msg);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async getAllLeases(): Promise<BaseLease[]> {
|
||||
const result: BaseLease[] = [];
|
||||
const withHost = this._context.withHost;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
const leaseBlobs: StorageBlobService.BlobResult[] = await this._listBlobs();
|
||||
for (const lbi of leaseBlobs) {
|
||||
const name = lbi.name;
|
||||
const partitionId = path.basename(name);
|
||||
const leaseInfo: BaseLeaseInfo = {
|
||||
partitionId: partitionId,
|
||||
owner: lbi.metadata![metadataOwnerName]
|
||||
};
|
||||
const lease = new BaseLease(leaseInfo);
|
||||
lease.isOwned = (lbi.lease && lbi.lease.state === LeaseState.leased) || false;
|
||||
result.push(lease);
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"BlobResult item from the list " + "of blobs is: name: %s, lease: %o, metadata: %o."
|
||||
),
|
||||
lbi.name,
|
||||
lbi.lease,
|
||||
lbi.metadata
|
||||
);
|
||||
}
|
||||
} catch (err: any) {
|
||||
const info: EPHDiagnosticInfo = {
|
||||
error: err,
|
||||
action: EPHActionStrings.gettingAllLeases,
|
||||
hostName: this._context.hostName,
|
||||
partitionId: "N/A"
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
}
|
||||
log.checkpointLeaseMgr(withHost("Number of leases: %d"), result.length);
|
||||
return result;
|
||||
}
|
||||
|
||||
async createAllLeasesIfNotExists(partitionIds: string[]): Promise<void> {
|
||||
try {
|
||||
const withHost = this._context.withHost;
|
||||
const leaseBlobs = await this._listBlobs();
|
||||
if (leaseBlobs.length === partitionIds.length) {
|
||||
log.checkpointLeaseMgr(
|
||||
withHost(
|
||||
"Number of blobs %d === Number of partitionIds %d. " + "Hence no need to create leases."
|
||||
),
|
||||
leaseBlobs.length,
|
||||
partitionIds.length
|
||||
);
|
||||
return;
|
||||
} else {
|
||||
const createPromises: Promise<CompleteLease>[] = [];
|
||||
for (const id of partitionIds) {
|
||||
const createPromise: Promise<CompleteLease> = this.createLeaseIfNotExists(id);
|
||||
createPromises.push(createPromise);
|
||||
}
|
||||
await Promise.all(createPromises);
|
||||
}
|
||||
} catch (err: any) {
|
||||
const info: EPHDiagnosticInfo = {
|
||||
error: err,
|
||||
action: EPHActionStrings.creatingAllLeases,
|
||||
hostName: this._context.hostName,
|
||||
partitionId: "N/A"
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async createLeaseIfNotExists(partitionId: string): Promise<CompleteLease> {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
log.checkpointLeaseMgr(withHostAndPartition(partitionId, "createLeaseIfNotExists."));
|
||||
let returnLease: AzureBlobLease;
|
||||
try {
|
||||
const blob = this.getAzureBlob(partitionId);
|
||||
returnLease = AzureBlobLease.createFromPartitionId(partitionId, blob);
|
||||
await this._uploadLease(returnLease, UploadActivity.create);
|
||||
} catch (error: any) {
|
||||
const statusCode = (error as StorageError).statusCode;
|
||||
const code = (error as StorageError).code;
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
|
||||
// LeaseIdMissing || BlobAlreadyExists
|
||||
if (
|
||||
(statusCode === 412 && code && code.toLowerCase() === "leaseidmissing") ||
|
||||
(statusCode === 409 && code && code.toLowerCase() === "blobalreadyexists")
|
||||
) {
|
||||
returnLease = <AzureBlobLease>await this.getLease(partitionId);
|
||||
} else {
|
||||
log.error(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"An error occurred while creating lease if " + "it does not exist: %O."
|
||||
),
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
return returnLease;
|
||||
}
|
||||
|
||||
async deleteLease(lease: AzureBlobLease): Promise<void> {
|
||||
try {
|
||||
return await lease.blob.deleteBlobIfExists();
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while deleting the lease for blobPath ` +
|
||||
`"${this._context.composedBlobPrefix}${lease.partitionId}". It is: \n` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(this._context.withHostAndPartition(lease, msg));
|
||||
throw new Error(msg);
|
||||
}
|
||||
}
|
||||
|
||||
async acquireLease(lease: AzureBlobLease): Promise<boolean> {
|
||||
let result: boolean = true;
|
||||
const newLeaseId: string = uuid();
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
// TODO: We are initializing newToken to empty string.
|
||||
let newToken: string = "";
|
||||
const blobResult = await lease.blob.getBlobProperties();
|
||||
if (blobResult.lease && blobResult.lease.state && blobResult.lease.state === "leased") {
|
||||
if (!lease.token) {
|
||||
// We reach here in a race condition: when this instance of EventProcessorHost scanned the
|
||||
// lease blobs, this partition was unowned (token is empty) but between then and now, another
|
||||
// instance of EPH has established a lease (getLeaseState() is LEASED). We normally enforce
|
||||
// that we only steal the lease if it is still owned by the instance which owned it when we
|
||||
// scanned, but we can't do that when we don't know who owns it. The safest thing to do is just
|
||||
// fail the acquisition. If that means that one EPH instance gets more partitions than it should,
|
||||
// rebalancing will take care of that quickly enough.
|
||||
return false;
|
||||
}
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(
|
||||
lease,
|
||||
"Need to change lease '%s' -> '%s' " + "for partitionId '%s'."
|
||||
),
|
||||
lease.token,
|
||||
newLeaseId,
|
||||
lease.partitionId
|
||||
);
|
||||
const changeLeaseResult = await lease.blob.changeLease(lease.token, newLeaseId);
|
||||
newToken = changeLeaseResult.id;
|
||||
} else {
|
||||
try {
|
||||
const options: StorageBlobService.AcquireLeaseRequestOptions = {
|
||||
leaseDuration: this.leaseDuration,
|
||||
proposedLeaseId: newLeaseId
|
||||
};
|
||||
const acquireResult = await lease.blob.acquireLease(options);
|
||||
newToken = acquireResult.id;
|
||||
} catch (err: any) {
|
||||
const statusCode = err && (err as StorageError).statusCode;
|
||||
const code = err && (err as StorageError).code;
|
||||
if (statusCode === 409 && code && code.toLowerCase() === "leasealreadypresent") {
|
||||
// Either some other host grabbed the lease or checkpoint call renewed it.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
lease.token = newToken;
|
||||
lease.owner = this._context.hostName;
|
||||
// Increment epoch each time lease is acquired or stolen by a new host
|
||||
lease.incrementEpoch();
|
||||
await this._uploadLease(lease, UploadActivity.acquire);
|
||||
} catch (err: any) {
|
||||
if (this._wasLeaseLost(lease.partitionId, err)) {
|
||||
result = false;
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async renewLease(lease: AzureBlobLease): Promise<boolean> {
|
||||
let result: boolean = false;
|
||||
try {
|
||||
const options: StorageBlobService.LeaseRequestOptions = {
|
||||
timeoutIntervalInMs: this.leaseRenewInterval * 1000,
|
||||
maximumExecutionTimeInMs: maximumExecutionTimeInMsForLeaseRenewal
|
||||
};
|
||||
await lease.blob.renewLease(lease.token, options);
|
||||
result = true;
|
||||
} catch (err: any) {
|
||||
if (!this._wasLeaseLost(lease.partitionId, err)) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async releaseLease(lease: AzureBlobLease): Promise<void> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
const leaseId: string = lease.token;
|
||||
log.checkpointLeaseMgr(withHostAndPartition(lease, "Trying to release the lease."));
|
||||
const releasedCopy = new AzureBlobLease({ ...lease.getInfo(), blob: lease.blob });
|
||||
releasedCopy.owner = "";
|
||||
releasedCopy.token = "";
|
||||
await this._uploadLease(lease, UploadActivity.release);
|
||||
await lease.blob.releaseLease(leaseId);
|
||||
} catch (err: any) {
|
||||
if (!this._wasLeaseLost(lease.partitionId, err)) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
async updateLease(lease: AzureBlobLease): Promise<boolean> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
if (lease == undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!lease.token) {
|
||||
return false;
|
||||
}
|
||||
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(
|
||||
lease,
|
||||
"Let us renew the lease to make sure the " +
|
||||
"update with offset '%s' and sequence number %d will go through."
|
||||
),
|
||||
lease.offset,
|
||||
lease.sequenceNumber
|
||||
);
|
||||
let result = await this.renewLease(lease);
|
||||
if (result) {
|
||||
try {
|
||||
await this._uploadLease(lease, UploadActivity.update);
|
||||
} catch (err: any) {
|
||||
if (this._wasLeaseLost(lease.partitionId, err)) {
|
||||
result = false;
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
// else could not renew lease due to lease loss. Result is already false, so pass it unchanged
|
||||
return result;
|
||||
}
|
||||
|
||||
checkpointStoreExists(): Promise<boolean> {
|
||||
log.checkpointLeaseMgr(this._context.withHost("Checking whether the checkpoint store exists."));
|
||||
return this.leaseStoreExists();
|
||||
}
|
||||
|
||||
async deleteCheckpointStore(): Promise<void> {
|
||||
return this.deleteLeaseStore();
|
||||
}
|
||||
|
||||
async createCheckpointStoreIfNotExists(): Promise<void> {
|
||||
// This is a no-op since this method will be called only creating the lease store.
|
||||
// The lease store and the checkpoint store are the same thing.
|
||||
return;
|
||||
}
|
||||
|
||||
async createAllCheckpointsIfNotExists(partitionIds: string[]): Promise<void> {
|
||||
validateType("partitionIds", partitionIds, true, "Array");
|
||||
// Because we control the caller, we know that this method will only be called after
|
||||
// createAllLeasesIfNotExists. In this implementation checkpoints are in the same
|
||||
// blobs as leases, so the blobs will already exist if execution reaches here.
|
||||
return;
|
||||
}
|
||||
|
||||
async getCheckpoint(partitionId: string): Promise<CheckpointInfo | undefined> {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
let result: CheckpointInfo | undefined;
|
||||
log.checkpointLeaseMgr(withHostAndPartition(partitionId, "Getting checkpoint."));
|
||||
const lease: AzureBlobLease | undefined = await this.getLease(partitionId);
|
||||
if (lease != undefined && lease.offset) {
|
||||
result = CheckpointInfo.createFromLease(lease.getInfo());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async updateCheckpoint(lease: AzureBlobLease, checkpoint: CheckpointInfo): Promise<void> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(checkpoint, "Checkpoint at offset '%s' and seqno %d."),
|
||||
checkpoint.offset,
|
||||
checkpoint.sequenceNumber
|
||||
);
|
||||
lease.offset = checkpoint.offset;
|
||||
lease.sequenceNumber = checkpoint.sequenceNumber;
|
||||
try {
|
||||
if (await this.updateLease(lease)) {
|
||||
return;
|
||||
} else {
|
||||
const msg =
|
||||
`Lease lost while updating the checkpoint for partitionId ` +
|
||||
`'${checkpoint.partitionId}'.Hence could not update it.`;
|
||||
log.error(withHostAndPartition(lease, msg));
|
||||
throw new Error(msg);
|
||||
}
|
||||
} catch (err: any) {
|
||||
const info: EPHDiagnosticInfo = {
|
||||
action: EPHActionStrings.updatingCheckpoint,
|
||||
error: err,
|
||||
hostName: this._context.hostName,
|
||||
partitionId: checkpoint.partitionId
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
}
|
||||
}
|
||||
|
||||
async deleteCheckpoint(partitionId: string): Promise<void> {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
// This is a no-op to avoid deleting leases accidentally.
|
||||
}
|
||||
|
||||
private async _listBlobs(): Promise<StorageBlobService.BlobResult[]> {
|
||||
const blobService = this._context.blobService;
|
||||
const withHost = this._context.withHost;
|
||||
if (blobService) {
|
||||
const listResult = await blobService.listBlobsSegmented(this._context.storageContainerName!);
|
||||
log.checkpointLeaseMgr(withHost("Number of blobs: %d"), listResult.entries.length);
|
||||
return listResult.entries;
|
||||
} else {
|
||||
throw new Error(
|
||||
"'blobService' is not defined in the 'hostContext', hence cannot " + "list all the blobs."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async _uploadLease(
|
||||
lease: AzureBlobLease,
|
||||
activity: UploadActivity,
|
||||
options?: StorageBlobService.CreateBlobRequestOptions
|
||||
): Promise<void> {
|
||||
const partitionId = lease.partitionId;
|
||||
const blob = lease.blob;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
if (activity !== UploadActivity.create) {
|
||||
// It is possible for AzureBlobLease objects in memory to have stale offset/sequence number
|
||||
// fields if a checkpoint was written but PartitionManager hasn't done its ten-second sweep
|
||||
// which downloads new copies of all the leases. This can happen because we're trying to
|
||||
// maintain the fiction that checkpoints and leases are separate -- which they can be in
|
||||
// other implementations -- even though they are completely intertwined in this
|
||||
// implementation. To prevent writing stale checkpoint data to the store, merge the
|
||||
// checkpoint data from the most recently written checkpoint into this write, if needed.
|
||||
if (this._latestCheckpoint.has(partitionId)) {
|
||||
const cached: CheckpointInfo = this._latestCheckpoint.get(partitionId)!;
|
||||
if (cached.sequenceNumber > lease.sequenceNumber || lease.offset == undefined) {
|
||||
(lease.offset = cached.offset), (lease.sequenceNumber = cached.sequenceNumber);
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(
|
||||
lease,
|
||||
"Updating stale offset/seqno with " + "new values %s/%d while uploading lease."
|
||||
),
|
||||
lease.offset,
|
||||
lease.sequenceNumber
|
||||
);
|
||||
} else if (lease.offset != undefined) {
|
||||
this._latestCheckpoint.set(partitionId, CheckpointInfo.createFromLease(lease.getInfo()));
|
||||
}
|
||||
}
|
||||
}
|
||||
const jsonToUpload = lease.serialize();
|
||||
if (!options) {
|
||||
options = {
|
||||
leaseId: lease.token
|
||||
};
|
||||
}
|
||||
if (!options.metadata) options.metadata = {};
|
||||
// - For "acquire" and "update" activities, the metadata must be set, since that is the time
|
||||
// when the host actually owns the lease. If metadata is not set for update activity
|
||||
// (i.e. while checkpointing), then the metadata is wiped off (over-written).
|
||||
// This causes problems for the partition scanner while trying to determine the lease owner.
|
||||
// - For "release" activity the metadata needs to be deleted/unset, since the intention is to
|
||||
// not own the lease anymore (due to lease being lost or the receiver shutting down). Hence,
|
||||
// setting the metadata as an empty object.
|
||||
// - For "create" activity, the intention is to create a lease if it does not exist, but not own
|
||||
// it. The lease state will be available and the status will be unlocked. Hence setting the
|
||||
// metadata as an empty object.
|
||||
if (activity === UploadActivity.acquire || activity === UploadActivity.update) {
|
||||
options.metadata[metadataOwnerName] = lease.owner || this._context.hostName;
|
||||
}
|
||||
log.checkpointLeaseMgr(
|
||||
withHostAndPartition(
|
||||
lease,
|
||||
"Trying to upload raw JSON for activity " + "'%s': %s, with options: %o"
|
||||
),
|
||||
activity,
|
||||
jsonToUpload,
|
||||
options
|
||||
);
|
||||
await blob.updateContent(jsonToUpload, options);
|
||||
}
|
||||
|
||||
private _wasLeaseLost(partitionId: string, err: StorageError): boolean {
|
||||
let result: boolean = false;
|
||||
const statusCode = err.statusCode;
|
||||
const code = err.code;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
// conflict OR precondition failed.
|
||||
if ((statusCode && statusCode === 409) || statusCode === 412) {
|
||||
if (
|
||||
!code ||
|
||||
(code &&
|
||||
(code.toLowerCase() === leaseLost ||
|
||||
code.toLowerCase() === leaseIdMismatchWithLeaseOperation ||
|
||||
code.toLowerCase() === leaseIdMismatchWithBlobOperation))
|
||||
) {
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
log.error(
|
||||
withHostAndPartition(partitionId, "Was lease lost -> %s, err: %O."),
|
||||
result,
|
||||
getStorageError(err)
|
||||
);
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
/**
|
||||
* Describes the basic information required in a lease.
|
||||
*/
|
||||
export interface BaseLeaseInfo {
|
||||
/**
|
||||
* @property {string} partitionId The associated partitionId for which the lease is held.
|
||||
*/
|
||||
partitionId: string;
|
||||
/**
|
||||
* @property {string} owner The host owner for the partition.
|
||||
*/
|
||||
owner: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the base lease.
|
||||
*/
|
||||
export class BaseLease implements BaseLeaseInfo {
|
||||
/**
|
||||
* @property {string} partitionId The associated partitionId for which the lease is held.
|
||||
* @readonly
|
||||
*/
|
||||
readonly partitionId: string;
|
||||
/**
|
||||
* @property {string} owner The host owner for the partition.
|
||||
*/
|
||||
owner: string;
|
||||
/**
|
||||
* @property {boolean} isOwned Indicates wether the lease is owned. `true` if it is owned by
|
||||
* someone; `false` otherwise.
|
||||
*/
|
||||
isOwned: boolean = false;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param info The information required to create a base lease.
|
||||
*/
|
||||
constructor(info: BaseLeaseInfo) {
|
||||
this.partitionId = info.partitionId;
|
||||
this.owner = info.owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares possibleOwner against this.owner
|
||||
* @param {string} possibleOwner The owner name to check.
|
||||
* @returns {boolean} boolean - true if possibleOwner is the same as this.owner, false otherwise.
|
||||
*/
|
||||
isOwnedBy(possibleOwner: string): boolean {
|
||||
return this.owner === possibleOwner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the lease information.
|
||||
* @returns {CompleteLeaseInfo} CompleteLeaseInfo.
|
||||
*/
|
||||
getInfo(): BaseLeaseInfo {
|
||||
const info: BaseLeaseInfo = {
|
||||
partitionId: this.partitionId,
|
||||
owner: this.owner
|
||||
};
|
||||
return info;
|
||||
}
|
||||
}
|
|
@ -1,773 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { Dictionary } from "@azure/event-hubs";
|
||||
import {
|
||||
createBlobService,
|
||||
BlobService as StorageBlobService,
|
||||
ServiceResponse
|
||||
} from "azure-storage";
|
||||
import * as log from "./log";
|
||||
import { validateType, getStorageError } from "./util/utils";
|
||||
import { defaultMaximumExecutionTimeInMs } from "./util/constants";
|
||||
import path from "path";
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface CreateContainerResult {
|
||||
created: StorageBlobService.ContainerResult;
|
||||
details: ServiceResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export enum LeaseState {
|
||||
/**
|
||||
* The lease state is not specified.
|
||||
*/
|
||||
unspecified = "unspecified",
|
||||
|
||||
/**
|
||||
* The lease is in the "available" state.
|
||||
*/
|
||||
available = "available",
|
||||
|
||||
/**
|
||||
* The lease is in the "leased" state.
|
||||
*/
|
||||
leased = "leased",
|
||||
|
||||
/**
|
||||
* The lease is in the "expired" state.
|
||||
*/
|
||||
expired = "expired",
|
||||
|
||||
/**
|
||||
* The lease is in the "breaking" state.
|
||||
*/
|
||||
breaking = "breaking",
|
||||
|
||||
/**
|
||||
* The lease is in the "broken" state.
|
||||
*/
|
||||
broken = "broken"
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class BlobService {
|
||||
private _hostName: string;
|
||||
private _connectionString: string;
|
||||
private _storageBlobService: StorageBlobService;
|
||||
private _beginningOfTime: string = new Date(1990, 0, 1).toUTCString();
|
||||
|
||||
constructor(hostName: string, connectionString: string) {
|
||||
this._hostName = hostName;
|
||||
this._connectionString = connectionString;
|
||||
this._storageBlobService = createBlobService(this._connectionString);
|
||||
this._storageBlobService.defaultMaximumExecutionTimeInMs = defaultMaximumExecutionTimeInMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that the container and blob exist.
|
||||
*/
|
||||
async ensureContainerAndBlobExist(containerName: string, blobPath: string): Promise<void> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
try {
|
||||
await this.ensureContainerExists(containerName);
|
||||
await this.ensureBlobExists(containerName, blobPath, "{}");
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while ensuring that the container and blob exists. ` +
|
||||
`It is: \n${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error("[%s] [%s] %s", this._hostName, partitionId, msg);
|
||||
throw new Error(msg);
|
||||
}
|
||||
}
|
||||
|
||||
ensureContainerExists(containerName: string): Promise<CreateContainerResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
|
||||
return new Promise<CreateContainerResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] Ensuring that the container '%s' exists.",
|
||||
this._hostName,
|
||||
containerName
|
||||
);
|
||||
this._storageBlobService.createContainerIfNotExists(
|
||||
containerName,
|
||||
(error, result, response) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] An error occurred while ensuring that the container '%s' exists: %O",
|
||||
this._hostName,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
const containerInfo = { created: result, details: response };
|
||||
log.blobService(
|
||||
"[%s] Result for Container '%s': %O",
|
||||
this._hostName,
|
||||
containerName,
|
||||
containerInfo
|
||||
);
|
||||
resolve(containerInfo);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
doesContainerExist(containerName: string): Promise<boolean> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
|
||||
return new Promise<boolean>((resolve, reject) => {
|
||||
this._storageBlobService.doesContainerExist(containerName, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] An error occurred while determining whether the container " + "'%s' exists: % O.",
|
||||
this._hostName,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] Does container '%s' exist -> %s.",
|
||||
this._hostName,
|
||||
containerName,
|
||||
result.exists
|
||||
);
|
||||
resolve(Boolean(result.exists));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
doesBlobExist(containerName: string, blobPath: string): Promise<boolean> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<boolean>((resolve, reject) => {
|
||||
this._storageBlobService.doesBlobExist(containerName, blobPath, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while determining whether the blob '%s' exists in " +
|
||||
"container '%s': %O",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Does blob '%s' exist in container '%s' -> %s.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
containerName,
|
||||
result.exists
|
||||
);
|
||||
resolve(Boolean(result.exists));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
ensureBlobExists(containerName: string, blobPath: string, text: string): Promise<void> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("text", text, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const options: StorageBlobService.CreateBlobRequestOptions = {
|
||||
accessConditions: {
|
||||
DateUnModifiedSince: this._beginningOfTime
|
||||
}
|
||||
};
|
||||
log.blobService(
|
||||
"[%s] [%s] Ensuring that blob '%s' exists in container '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
containerName
|
||||
);
|
||||
this._storageBlobService.createBlockBlobFromText(
|
||||
containerName,
|
||||
blobPath,
|
||||
text,
|
||||
options,
|
||||
(error) => {
|
||||
if (error) {
|
||||
if ((error as any).statusCode === 412) {
|
||||
// Blob already exists.
|
||||
resolve();
|
||||
} else {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while ensuring that blob '%s' exists in " +
|
||||
"container '%s': %O",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
}
|
||||
} else {
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
renewLease(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
leaseId: string,
|
||||
options: StorageBlobService.LeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("leaseId", leaseId, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.LeaseResult>((resolve, reject) => {
|
||||
if (!options) options = {};
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to renew lease '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.renewLease(
|
||||
containerName,
|
||||
blobPath,
|
||||
leaseId,
|
||||
options,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while renewing lease '%s' for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, renewed lease with leaseId: '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
releaseLease(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
leaseId: string,
|
||||
options?: StorageBlobService.LeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("leaseId", leaseId, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.LeaseResult>((resolve, reject) => {
|
||||
if (!options) options = {};
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to release lease '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.releaseLease(
|
||||
containerName,
|
||||
blobPath,
|
||||
leaseId,
|
||||
options,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while releasing lease '%s' for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, released lease with leaseId: '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
leaseId,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
updateContent(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
text: string,
|
||||
options?: StorageBlobService.CreateBlobRequestOptions
|
||||
): Promise<StorageBlobService.BlobResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("text", text, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.BlobResult>((resolve, reject) => {
|
||||
if (!options) options = {};
|
||||
log.blobService(
|
||||
"[%s] [%s] Updating content '%s' in the container '%s' of the blob '%s' .",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
text,
|
||||
containerName,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.createBlockBlobFromText(
|
||||
containerName,
|
||||
blobPath,
|
||||
text,
|
||||
options,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while updating content '%s' to blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
text,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, updated blob content '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
text,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
getContent(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
options?: StorageBlobService.GetBlobRequestOptions
|
||||
): Promise<string> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
if (!options) options = {};
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to getcontent from blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.getBlobToText(containerName, blobPath, options, (error, text) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while getting content from blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, fetched blob content '%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
text,
|
||||
blobPath
|
||||
);
|
||||
resolve(text);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
changeLease(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
currentLeaseId: string,
|
||||
proposedLeaseId: string
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("currentLeaseId", currentLeaseId, true, "string");
|
||||
validateType("proposedLeaseId", proposedLeaseId, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.LeaseResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to change lease '%s' for blobPath '%s' with new lease '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
currentLeaseId,
|
||||
blobPath,
|
||||
proposedLeaseId
|
||||
);
|
||||
this._storageBlobService.changeLease(
|
||||
containerName,
|
||||
blobPath,
|
||||
currentLeaseId,
|
||||
proposedLeaseId,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while changing lease '%s' to '%s' for blobPath " +
|
||||
"'%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
currentLeaseId,
|
||||
proposedLeaseId,
|
||||
blobPath,
|
||||
error
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, changed current lease '%s' with proposed lease " +
|
||||
"'%s' for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
currentLeaseId,
|
||||
proposedLeaseId,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
getBlobProperties(
|
||||
containerName: string,
|
||||
blobPath: string
|
||||
): Promise<StorageBlobService.BlobResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.BlobResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to get blob props for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.getBlobProperties(containerName, blobPath, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while getting blob props for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, got blob props for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
listBlobsSegmented(
|
||||
containerName: string,
|
||||
options?: StorageBlobService.ListBlobsSegmentedRequestOptions
|
||||
): Promise<StorageBlobService.ListBlobsResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
if (!options) {
|
||||
options = {
|
||||
maxResults: 5000,
|
||||
include: "metadata"
|
||||
};
|
||||
}
|
||||
return new Promise<StorageBlobService.ListBlobsResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] Attempting to list blobs for container '%s'.",
|
||||
this._hostName,
|
||||
containerName
|
||||
);
|
||||
this._storageBlobService.listBlobsSegmented(
|
||||
containerName,
|
||||
undefined as any,
|
||||
options!,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] An error occurred while listing blobs for container '%s': %O.",
|
||||
this._hostName,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] Successfully, received the list of blobs for container '%s'.",
|
||||
this._hostName,
|
||||
containerName
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
getBlobMetadata(containerName: string, blobPath: string): Promise<StorageBlobService.BlobResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.BlobResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to get blob metadata for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.getBlobMetadata(containerName, blobPath, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while getting blob metadata for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, got the blob metadata %o for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
result,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
setBlobMetadata(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
metadata: Dictionary<string>,
|
||||
options?: StorageBlobService.BlobRequestOptions
|
||||
): Promise<StorageBlobService.BlobResult> {
|
||||
if (!options) options = {};
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("metadata", metadata, true, "object");
|
||||
validateType("options", options, true, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.BlobResult>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to set blob metadata %o for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
metadata,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.setBlobMetadata(
|
||||
containerName,
|
||||
blobPath,
|
||||
metadata,
|
||||
options!,
|
||||
(error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while setting blob metadata for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Successfully, set the blob metadata for blobPath '%s'. " +
|
||||
"The result is: name: %s, metadata: %o, lease: %o",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
result.name,
|
||||
result.metadata,
|
||||
result.lease
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
acquireLease(
|
||||
containerName: string,
|
||||
blobPath: string,
|
||||
options: StorageBlobService.AcquireLeaseRequestOptions
|
||||
): Promise<StorageBlobService.LeaseResult> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<StorageBlobService.LeaseResult>((resolve, reject) => {
|
||||
if (!options) options = {};
|
||||
log.blobService(
|
||||
"[%s] [%s] Attempting to acquire lease for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.acquireLease(containerName, blobPath, options, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] [%s] An error occurred while acquiring lease for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Acquired lease '%s' for blobPath '%s.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
result.id,
|
||||
blobPath
|
||||
);
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async deleteBlobIfExists(containerName: string, blobPath: string): Promise<void> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
validateType("blobPath", blobPath, true, "string");
|
||||
const partitionId = path.basename(blobPath);
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
log.blobService(
|
||||
"[%s] Attempting to delete blob for blobPath '%s'.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath
|
||||
);
|
||||
this._storageBlobService.deleteBlobIfExists(containerName, blobPath, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] An error occurred while deleting blob for blobPath '%s': %O.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
blobPath,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] [%s] Deleted blob '%s' -> %s.",
|
||||
this._hostName,
|
||||
partitionId,
|
||||
partitionId,
|
||||
blobPath,
|
||||
result
|
||||
);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async deleteContainerIfExists(containerName: string): Promise<void> {
|
||||
validateType("containerName", containerName, true, "string");
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
log.blobService("[%s] Attempting to delete container '%s'.", this._hostName, containerName);
|
||||
this._storageBlobService.deleteContainerIfExists(containerName, (error, result) => {
|
||||
if (error) {
|
||||
log.error(
|
||||
"[%s] An error occurred while deleting container '%s': %O.",
|
||||
this._hostName,
|
||||
containerName,
|
||||
getStorageError(error)
|
||||
);
|
||||
reject(error);
|
||||
} else {
|
||||
log.blobService(
|
||||
"[%s] Deleted container '%s' -> %s.",
|
||||
this._hostName,
|
||||
containerName,
|
||||
result
|
||||
);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
static create(hostName: string, connectionString: string): BlobService {
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("connectionString", connectionString, true, "string");
|
||||
|
||||
return new BlobService(hostName, connectionString);
|
||||
}
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import * as log from "./log";
|
||||
import { EventData } from "@azure/event-hubs";
|
||||
import { LeaseInfo } from "./azureBlobLease";
|
||||
import { validateType } from "./util/utils";
|
||||
|
||||
/**
|
||||
* Describes the checkoint information.
|
||||
* @interface CheckpointInfo
|
||||
*/
|
||||
export interface CheckpointInfo {
|
||||
/**
|
||||
* @property {string} partitionId The EventHub partition id.
|
||||
*/
|
||||
partitionId: string;
|
||||
/**
|
||||
* @property {string} offset The offset of the event to be checked in.
|
||||
*/
|
||||
offset: string;
|
||||
/**
|
||||
* @property {string} sequenceNumber The sequence number of the event to be checked in.
|
||||
*/
|
||||
sequenceNumber: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the checkoint information.
|
||||
* @namespace CheckpointInfo
|
||||
*/
|
||||
export namespace CheckpointInfo {
|
||||
/**
|
||||
* Creates the checkpoint info
|
||||
* @param {string} partitionId The partition id for the checkpoint
|
||||
* @param {string} [offset] The offset of the event to be checked in.
|
||||
* @param {number} [sequenceNumber] The sequence number of the event to be checked in.
|
||||
* @return {CheckpointInfo} CheckpointInfo
|
||||
*/
|
||||
export function create(
|
||||
partitionId: string,
|
||||
offset?: string,
|
||||
sequenceNumber?: number
|
||||
): CheckpointInfo {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
validateType("offset", offset, false, "string");
|
||||
validateType("sequenceNumber", sequenceNumber, false, "number");
|
||||
const checkpoint: CheckpointInfo = {
|
||||
partitionId: partitionId,
|
||||
offset: offset || "-1",
|
||||
sequenceNumber: sequenceNumber != undefined ? sequenceNumber : 0
|
||||
};
|
||||
log.checkpoint("The created CheckpointInfo is: %o", checkpoint);
|
||||
return checkpoint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the checkpoint info
|
||||
* @param {LeaseInfo} lease The lease info from which the checkpoint info needs to created.
|
||||
* @return {CheckpointInfo} CheckpointInfo
|
||||
*/
|
||||
export function createFromLease(lease: LeaseInfo): CheckpointInfo {
|
||||
validateType("lease", lease, true, "object");
|
||||
const checkpoint: CheckpointInfo = {
|
||||
offset: lease.offset || "-1",
|
||||
partitionId: lease.partitionId,
|
||||
sequenceNumber: lease.sequenceNumber
|
||||
};
|
||||
log.checkpoint("The created CheckpointInfo from lease %o is: %o", lease, checkpoint);
|
||||
return checkpoint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the checkpoint info.
|
||||
* @param {string} partitionId The partition id for the checkpoint
|
||||
* @param {EventData} eventData The event data from which the checkpoint info needs to created.
|
||||
* @return {CheckpointInfo} CheckpointInfo
|
||||
*/
|
||||
export function createFromEventData(partitionId: string, eventData: EventData): CheckpointInfo {
|
||||
validateType("partitionId", partitionId, true, "string");
|
||||
validateType("eventData", eventData, true, "object");
|
||||
validateType("eventData.offset", eventData.offset, true, "string");
|
||||
validateType("eventData.sequenceNumber", eventData.sequenceNumber, true, "number");
|
||||
|
||||
const checkpoint: CheckpointInfo = {
|
||||
partitionId: partitionId,
|
||||
offset: eventData.offset!,
|
||||
sequenceNumber: eventData.sequenceNumber!
|
||||
};
|
||||
log.checkpoint("The created CheckpointInfo from EventData %o is: %o", eventData, checkpoint);
|
||||
return checkpoint;
|
||||
}
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { CheckpointInfo } from "./checkpointInfo";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
|
||||
/**
|
||||
* @interface CheckpointManager
|
||||
* If you wish to have EventProcessorHost store checkpoints somewhere other than Azure Storage,
|
||||
* you can write your own checkpoint manager using this interface.
|
||||
*
|
||||
* The Azure Storage managers use the same storage for both lease and checkpoints, so both the
|
||||
* interfaces are implemented by the same class. You are free to do the same thing if you have
|
||||
* a unified store for both types of data.
|
||||
*
|
||||
* This interface does not specify initialization methods because we have no way of knowing what
|
||||
* information your implementation will require.
|
||||
*/
|
||||
export interface CheckpointManager {
|
||||
/**
|
||||
* Does the checkpoint store exist?
|
||||
* @returns {Promise<boolean>} Promise<boolean> `true` if it exists, `false` if it does not exist.
|
||||
*/
|
||||
checkpointStoreExists(): Promise<boolean>;
|
||||
/**
|
||||
* Create the checkpoint store if it doesn't exist. Does nothing if it exists.
|
||||
* @returns {Promise<boolean>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
createCheckpointStoreIfNotExists(): Promise<void>;
|
||||
/**
|
||||
* Deletes the checkpoint store.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
deleteCheckpointStore(): Promise<void>;
|
||||
/**
|
||||
* Creates the checkpoint HOLDERs for the given partitions. Does nothing for any checkpoint
|
||||
* HOLDERs that already exist.
|
||||
*
|
||||
* The semantics of this are complicated because it is possible to use the same store for both
|
||||
* leases and checkpoints (the Azure Storage implementation does so) and it is required to
|
||||
* have a lease for every partition but it is not required to have a checkpoint for a partition.
|
||||
* It is a valid scenario to never use checkpoints at all, so it is important for the store to
|
||||
* distinguish between creating the structure(s) that will hold a checkpoint and actually creating
|
||||
* a checkpoint (storing an offset/sequence number pair in the structure).
|
||||
*
|
||||
* @param {string[]} partitionIds List of partitions to create checkpoint HOLDERs for.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
createAllCheckpointsIfNotExists(partitionIds: string[]): Promise<void>;
|
||||
/**
|
||||
* Gets the checkpoint info associated with the given partition. Could return undefined if no
|
||||
* checkpoint has been created for that partition.
|
||||
* @param {string} partitionId The partitionId to get the checkpoint info for.
|
||||
* @returns {Promise<CheckpointInfo | undefined>} Promise<CheckpointInfo | undefined> Checkpoint
|
||||
* info for the given partition, or undefined if none has been previously stored.
|
||||
*/
|
||||
getCheckpoint(partitionId: string): Promise<CheckpointInfo | undefined>;
|
||||
/**
|
||||
* Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint.
|
||||
* @param {CompleteLease} lease Partition information against which to perform a checkpoint.
|
||||
* @param {CheckpointInfo} checkpoint offset/sequeceNumber to update the store with
|
||||
* @returns {Promise<boolean>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
updateCheckpoint(lease: CompleteLease, checkpoint: CheckpointInfo): Promise<void>;
|
||||
/**
|
||||
* Delete the stored checkpoint for the given partition. If there is no stored checkpoint for the
|
||||
* given partition, that is treated as success. Deleting the checkpoint HOLDER is allowed
|
||||
* but not required; your implementation is free to do whichever is more convenient.
|
||||
* @param {string} partitionId The partitionId to delete the checkpoint from the store.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
deleteCheckpoint(partitionId: string): Promise<void>;
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import * as log from "./log";
|
||||
import { BaseLease, BaseLeaseInfo } from "./baseLease";
|
||||
|
||||
/**
|
||||
* Describes the properties of a Complete Lease.
|
||||
* @interface CompleteLeaseInfo
|
||||
*/
|
||||
export interface CompleteLeaseInfo extends BaseLeaseInfo {
|
||||
/**
|
||||
* @property {number} epoch The epoch(time) of the lease, which is a value you can use to
|
||||
* determine the most recent owner of a partition between competing nodes.
|
||||
*/
|
||||
epoch: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes a Complete Lease.
|
||||
* @class CompleteLease.
|
||||
*/
|
||||
export class CompleteLease extends BaseLease {
|
||||
/**
|
||||
* @property {number} epoch The epoch(time) of the lease, which is a value you can use to
|
||||
* determine the most recent owner of a partition between competing nodes.
|
||||
*/
|
||||
epoch: number;
|
||||
|
||||
/**
|
||||
* Creates an instance of the Lease.
|
||||
* @constructor
|
||||
* @param {CompleteLeaseInfo} info The Lease info.
|
||||
*/
|
||||
constructor(info: CompleteLeaseInfo) {
|
||||
super(info);
|
||||
this.epoch = info.epoch != undefined ? info.epoch : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the value of epoch by 1.
|
||||
* @returns {number} The incremented value of the epoch.
|
||||
*/
|
||||
incrementEpoch(): number {
|
||||
const result = ++this.epoch;
|
||||
log.completeLease("[%s] [%s] New epoch for lease is %s.", this.owner, this.partitionId, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the lease information.
|
||||
* @returns {CompleteLeaseInfo} CompleteLeaseInfo.
|
||||
*/
|
||||
getInfo(): CompleteLeaseInfo {
|
||||
const info = super.getInfo() as CompleteLeaseInfo;
|
||||
info.epoch = this.epoch;
|
||||
log.completeLease("[%s] [%s] Lease info is: %o", this.owner, this.partitionId, info);
|
||||
return info;
|
||||
}
|
||||
}
|
|
@ -1,558 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { v4 as uuid } from "uuid";
|
||||
import {
|
||||
TokenProvider,
|
||||
EventHubRuntimeInformation,
|
||||
EventHubPartitionRuntimeInformation,
|
||||
AadTokenProvider,
|
||||
EventHubClient
|
||||
} from "@azure/event-hubs";
|
||||
import {
|
||||
ApplicationTokenCredentials,
|
||||
UserTokenCredentials,
|
||||
DeviceTokenCredentials,
|
||||
MSITokenCredentials
|
||||
} from "@azure/ms-rest-nodeauth";
|
||||
import * as log from "./log";
|
||||
import { LeaseManager } from "./leaseManager";
|
||||
import { HostContext } from "./hostContext";
|
||||
import { CheckpointManager } from "./checkpointManager";
|
||||
import { validateType } from "./util/utils";
|
||||
import {
|
||||
FromConnectionStringOptions,
|
||||
EventProcessorHostOptions,
|
||||
FromTokenProviderOptions,
|
||||
OnReceivedMessage,
|
||||
OnReceivedError,
|
||||
FromIotHubConnectionStringOptions
|
||||
} from "./modelTypes";
|
||||
|
||||
/**
|
||||
* Describes the Event Processor Host to process events from an EventHub.
|
||||
* @class EventProcessorHost
|
||||
*/
|
||||
export class EventProcessorHost {
|
||||
/**
|
||||
* @property {ProcessorContextWithLeaseManager} _context The processor context.
|
||||
* @private
|
||||
*/
|
||||
private _context: HostContext;
|
||||
/**
|
||||
* Creates a new host to process events from an Event Hub.
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} storageConnectionString Connection string to Azure Storage account used for
|
||||
* leases and checkpointing. Example DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net
|
||||
* @param {EventHubClient} eventHubClient The EventHub client
|
||||
* @param {EventProcessorOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*/
|
||||
constructor(hostName: string, options?: EventProcessorHostOptions) {
|
||||
if (!options) options = {};
|
||||
this._context = HostContext.create(hostName, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the host name for the Event processor host.
|
||||
*/
|
||||
get hostName(): string {
|
||||
return this._context.hostName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the consumer group name for the Event processor host.
|
||||
*/
|
||||
get consumerGroup(): string {
|
||||
return this._context.consumerGroup;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the eventhub runtime information.
|
||||
* @returns {Promise<EventHubRuntimeInformation>}
|
||||
*/
|
||||
getHubRuntimeInformation(): Promise<EventHubRuntimeInformation> {
|
||||
return this._context.getHubRuntimeInformation();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides information about the specified partition.
|
||||
* @param {(string|number)} partitionId Partition ID for which partition information is required.
|
||||
*
|
||||
* @returns {EventHubPartitionRuntimeInformation} EventHubPartitionRuntimeInformation
|
||||
*/
|
||||
getPartitionInformation(
|
||||
partitionId: string | number
|
||||
): Promise<EventHubPartitionRuntimeInformation> {
|
||||
return this._context.getPartitionInformation(partitionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides an array of partitionIds.
|
||||
* @returns {Promise<string[]>}
|
||||
*/
|
||||
getPartitionIds(): Promise<string[]> {
|
||||
return this._context.getPartitionIds();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a list of partitions the EPH is currently receiving messages from.
|
||||
*
|
||||
* The EPH will try to grab leases for more partitions during each scan that happens once every
|
||||
* (configured) lease renew seconds. The number of EPH instances that are being run
|
||||
* simultaneously to receive messages from the same consumer group within an event hub also
|
||||
* influences the number of partitions that this instance of EPH is actively receiving messages
|
||||
* from.
|
||||
*
|
||||
* @returns {Array<string>} Array<string> List of partitions that this EPH instance is currently
|
||||
* receiving messages from.
|
||||
*/
|
||||
get receivingFromPartitions(): string[] {
|
||||
return Array.from(this._context.pumps.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the event processor host, fetching the list of partitions, and attempting to grab leases
|
||||
* For each successful lease, it will get the details from the blob and start a receiver at the
|
||||
* point where it left off previously.
|
||||
*
|
||||
* @return {Promise<void>}
|
||||
*/
|
||||
async start(onMessage: OnReceivedMessage, onError: OnReceivedError): Promise<void> {
|
||||
try {
|
||||
await this._context.partitionManager.start(onMessage, onError);
|
||||
} catch (err: any) {
|
||||
log.error(this._context.withHost("An error occurred while starting the EPH: %O"), err);
|
||||
this._context.onEphError(err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the EventProcessorHost from processing messages.
|
||||
* @return {Promise<void>}
|
||||
*/
|
||||
async stop(): Promise<void> {
|
||||
try {
|
||||
await this._context.partitionManager.stop();
|
||||
} catch (err: any) {
|
||||
log.error(this._context.withHost("An error occurred while stopping the EPH: %O"), err);
|
||||
this._context.onEphError(err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method for generating unique host name.
|
||||
*
|
||||
* @param {string} [prefix] String to use as the beginning of the name. Default value: "js-host".
|
||||
* @return {string} A unique host name
|
||||
*/
|
||||
static createHostName(prefix?: string): string {
|
||||
if (!prefix) prefix = "js-host";
|
||||
return `${prefix}-${uuid()}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from the EventHub connection string.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} storageConnectionString Connection string to Azure Storage account used for
|
||||
* leases and checkpointing. Example DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net
|
||||
* @param {string} storageContainerName Azure Storage container name for use by built-in lease
|
||||
* and checkpoint manager.
|
||||
* @param {string} eventHubConnectionString Connection string for the Event Hub to receive from.
|
||||
* Example: 'Endpoint=sb://my-servicebus-namespace.servicebus.windows.net/;
|
||||
* SharedAccessKeyName=my-SA-name;SharedAccessKey=my-SA-key'
|
||||
* @param {FromConnectionStringOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromConnectionString(
|
||||
hostName: string,
|
||||
storageConnectionString: string,
|
||||
storageContainerName: string,
|
||||
eventHubConnectionString: string,
|
||||
options?: FromConnectionStringOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("storageConnectionString", storageConnectionString, true, "string");
|
||||
validateType("storageContainerName", storageContainerName, true, "string");
|
||||
validateType("eventHubConnectionString", eventHubConnectionString, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
storageConnectionString: storageConnectionString,
|
||||
storageContainerName: storageContainerName,
|
||||
eventHubConnectionString: eventHubConnectionString
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from the EventHub connection string with the provided
|
||||
* checkpoint manager and lease manager.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} eventHubConnectionString Connection string for the Event Hub to receive from.
|
||||
* Example: 'Endpoint=sb://my-servicebus-namespace.servicebus.windows.net/;
|
||||
* SharedAccessKeyName=my-SA-name;SharedAccessKey=my-SA-key'
|
||||
* @param {CheckpointManager} checkpointManager A manager to manage checkpoints.
|
||||
* @param {LeaseManager} leaseManager A manager to manage leases.
|
||||
* @param {FromConnectionStringOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromConnectionStringWithCustomCheckpointAndLeaseManager(
|
||||
hostName: string,
|
||||
eventHubConnectionString: string,
|
||||
checkpointManager: CheckpointManager,
|
||||
leaseManager: LeaseManager,
|
||||
options?: FromConnectionStringOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("eventHubConnectionString", eventHubConnectionString, true, "string");
|
||||
validateType("checkpointManager", checkpointManager, true, "object");
|
||||
validateType("leaseManager", leaseManager, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
eventHubConnectionString: eventHubConnectionString,
|
||||
checkpointManager: checkpointManager,
|
||||
leaseManager: leaseManager
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from a TokenProvider.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} storageConnectionString Connection string to Azure Storage account used for
|
||||
* leases and checkpointing. Example DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net
|
||||
* @param {string} storageContainerName Azure Storage container name for use by built-in lease
|
||||
* and checkpoint manager.
|
||||
* @param {string} namespace Fully qualified domain name for Event Hubs.
|
||||
* Example: "{your-sb-namespace}.servicebus.windows.net"
|
||||
* @param {string} eventHubPath The name of the EventHub.
|
||||
* @param {TokenProvider} tokenProvider - Your token provider that implements the TokenProvider interface.
|
||||
* @param {FromTokenProviderOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromTokenProvider(
|
||||
hostName: string,
|
||||
storageConnectionString: string,
|
||||
storageContainerName: string,
|
||||
namespace: string,
|
||||
eventHubPath: string,
|
||||
tokenProvider: TokenProvider,
|
||||
options?: FromTokenProviderOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("storageConnectionString", storageConnectionString, true, "string");
|
||||
validateType("storageContainerName", storageContainerName, true, "string");
|
||||
validateType("namespace", namespace, true, "string");
|
||||
validateType("eventHubPath", eventHubPath, true, "string");
|
||||
validateType("tokenProvider", tokenProvider, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
if (!namespace.endsWith("/")) namespace += "/";
|
||||
const connectionString =
|
||||
`Endpoint=sb://${namespace};SharedAccessKeyName=defaultKeyName;` +
|
||||
`SharedAccessKey=defaultKeyValue;EntityPath=${eventHubPath}`;
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
tokenProvider: tokenProvider,
|
||||
storageConnectionString: storageConnectionString,
|
||||
storageContainerName: storageContainerName,
|
||||
eventHubPath: eventHubPath,
|
||||
eventHubConnectionString: connectionString
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from a TokenProvider with the provided checkpoint manager
|
||||
* and lease manager.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} namespace Fully qualified domain name for Event Hubs.
|
||||
* Example: "{your-sb-namespace}.servicebus.windows.net"
|
||||
* @param {string} eventHubPath The name of the EventHub.
|
||||
* @param {TokenProvider} tokenProvider - Your token provider that implements the TokenProvider interface.
|
||||
* @param {CheckpointManager} checkpointManager A manager to manage checkpoints.
|
||||
* @param {LeaseManager} leaseManager A manager to manage leases.
|
||||
* @param {FromTokenProviderOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromTokenProviderWithCustomCheckpointAndLeaseManager(
|
||||
hostName: string,
|
||||
namespace: string,
|
||||
eventHubPath: string,
|
||||
tokenProvider: TokenProvider,
|
||||
checkpointManager: CheckpointManager,
|
||||
leaseManager: LeaseManager,
|
||||
options?: FromTokenProviderOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("namespace", namespace, true, "string");
|
||||
validateType("eventHubPath", eventHubPath, true, "string");
|
||||
validateType("tokenProvider", tokenProvider, true, "object");
|
||||
validateType("checkpointManager", checkpointManager, true, "object");
|
||||
validateType("leaseManager", leaseManager, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
if (!namespace.endsWith("/")) namespace += "/";
|
||||
const connectionString =
|
||||
`Endpoint=sb://${namespace};SharedAccessKeyName=defaultKeyName;` +
|
||||
`SharedAccessKey=defaultKeyValue;EntityPath=${eventHubPath}`;
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
tokenProvider: tokenProvider,
|
||||
eventHubPath: eventHubPath,
|
||||
eventHubConnectionString: connectionString,
|
||||
checkpointManager: checkpointManager,
|
||||
leaseManager: leaseManager
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from AAD token credentials.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} storageConnectionString Connection string to Azure Storage account used for
|
||||
* leases and checkpointing. Example DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net
|
||||
* @param {string} storageContainerName Azure Storage container name for use by built-in lease
|
||||
* and checkpoint manager.
|
||||
* @param {string} namespace Fully qualified domain name for Event Hubs.
|
||||
* Example: "{your-sb-namespace}.servicebus.windows.net"
|
||||
* @param {string} eventHubPath The name of the EventHub.
|
||||
* @param {TokenCredentials} credentials - The AAD Token credentials. It can be one of the
|
||||
* following: ApplicationTokenCredentials | UserTokenCredentials | DeviceTokenCredentials
|
||||
* | MSITokenCredentials.
|
||||
* @param {FromTokenProviderOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromAadTokenCredentials(
|
||||
hostName: string,
|
||||
storageConnectionString: string,
|
||||
storageContainerName: string,
|
||||
namespace: string,
|
||||
eventHubPath: string,
|
||||
credentials:
|
||||
| ApplicationTokenCredentials
|
||||
| UserTokenCredentials
|
||||
| DeviceTokenCredentials
|
||||
| MSITokenCredentials,
|
||||
options?: FromTokenProviderOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("storageConnectionString", storageConnectionString, true, "string");
|
||||
validateType("storageContainerName", storageContainerName, true, "string");
|
||||
validateType("namespace", namespace, true, "string");
|
||||
validateType("eventHubPath", eventHubPath, true, "string");
|
||||
validateType("credentials", credentials, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
if (!namespace.endsWith("/")) namespace += "/";
|
||||
const connectionString =
|
||||
`Endpoint=sb://${namespace};SharedAccessKeyName=defaultKeyName;` +
|
||||
`SharedAccessKey=defaultKeyValue;EntityPath=${eventHubPath}`;
|
||||
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
tokenProvider: new AadTokenProvider(credentials),
|
||||
storageConnectionString: storageConnectionString,
|
||||
storageContainerName: storageContainerName,
|
||||
eventHubPath: eventHubPath,
|
||||
eventHubConnectionString: connectionString
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from AAD token credentials with the given checkpoint manager
|
||||
* and lease manager.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} namespace Fully qualified domain name for Event Hubs.
|
||||
* Example: "{your-sb-namespace}.servicebus.windows.net"
|
||||
* @param {string} eventHubPath The name of the EventHub.
|
||||
* @param {TokenCredentials} credentials - The AAD Token credentials. It can be one of the
|
||||
* following: ApplicationTokenCredentials | UserTokenCredentials | DeviceTokenCredentials
|
||||
* | MSITokenCredentials.
|
||||
* @param {CheckpointManager} checkpointManager A manager to manage checkpoints.
|
||||
* @param {LeaseManager} leaseManager A manager to manage leases.
|
||||
* @param {FromTokenProviderOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static createFromAadTokenCredentialsWithCustomCheckpointAndLeaseManager(
|
||||
hostName: string,
|
||||
namespace: string,
|
||||
eventHubPath: string,
|
||||
credentials:
|
||||
| ApplicationTokenCredentials
|
||||
| UserTokenCredentials
|
||||
| DeviceTokenCredentials
|
||||
| MSITokenCredentials,
|
||||
checkpointManager: CheckpointManager,
|
||||
leaseManager: LeaseManager,
|
||||
options?: FromTokenProviderOptions
|
||||
): EventProcessorHost {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("namespace", namespace, true, "string");
|
||||
validateType("eventHubPath", eventHubPath, true, "string");
|
||||
validateType("credentials", credentials, true, "object");
|
||||
validateType("checkpointManager", checkpointManager, true, "object");
|
||||
validateType("leaseManager", leaseManager, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
if (!namespace.endsWith("/")) namespace += "/";
|
||||
const connectionString =
|
||||
`Endpoint=sb://${namespace};SharedAccessKeyName=defaultKeyName;` +
|
||||
`SharedAccessKey=defaultKeyValue;EntityPath=${eventHubPath}`;
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
tokenProvider: new AadTokenProvider(credentials),
|
||||
eventHubPath: eventHubPath,
|
||||
eventHubConnectionString: connectionString,
|
||||
checkpointManager: checkpointManager,
|
||||
leaseManager: leaseManager
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from the IotHub connection string.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} storageConnectionString Connection string to Azure Storage account used for
|
||||
* leases and checkpointing. Example DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net
|
||||
* @param {string} storageContainerName Azure Storage container name for use by built-in lease
|
||||
* and checkpoint manager.
|
||||
* @param {string} iotHubConnectionString Connection string for the IotHub.
|
||||
* Example: 'Endpoint=iot-host-name;SharedAccessKeyName=my-SA-name;SharedAccessKey=my-SA-key'
|
||||
* @param {FromIotHubConnectionStringOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static async createFromIotHubConnectionString(
|
||||
hostName: string,
|
||||
storageConnectionString: string,
|
||||
storageContainerName: string,
|
||||
iotHubConnectionString: string,
|
||||
options?: FromIotHubConnectionStringOptions
|
||||
): Promise<EventProcessorHost> {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("storageConnectionString", storageConnectionString, true, "string");
|
||||
validateType("storageContainerName", storageContainerName, true, "string");
|
||||
validateType("iotHubConnectionString", iotHubConnectionString, true, "string");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
const client = await EventHubClient.createFromIotHubConnectionString(iotHubConnectionString);
|
||||
/* tslint:disable:no-string-literal */
|
||||
const eventHubConnectionString = client["_context"].config.connectionString;
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
storageConnectionString: storageConnectionString,
|
||||
storageContainerName: storageContainerName,
|
||||
eventHubConnectionString: eventHubConnectionString,
|
||||
eventHubPath: client.eventhubName
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an EventProcessorHost instance from the IotHub connection string with the given
|
||||
* checkpoint manager and lease manager.
|
||||
*
|
||||
* @param {string} hostName Name of the processor host. MUST BE UNIQUE.
|
||||
* Strongly recommend including a Guid or a prefix with a guid to ensure uniqueness. You can use
|
||||
* `EventProcessorHost.createHostName("your-prefix")`; Default: `js-host-${uuid()}`.
|
||||
* @param {string} iotHubConnectionString Connection string for the IotHub.
|
||||
* Example: 'Endpoint=iot-host-name;SharedAccessKeyName=my-SA-name;SharedAccessKey=my-SA-key'
|
||||
* @param {CheckpointManager} checkpointManager A manager to manage checkpoints.
|
||||
* @param {LeaseManager} leaseManager A manager to manage leases.
|
||||
* @param {FromIotHubConnectionStringOptions} [options] Optional parameters for creating an
|
||||
* EventProcessorHost.
|
||||
*
|
||||
* @returns {EventProcessorHost} EventProcessorHost
|
||||
*/
|
||||
static async createFromIotHubConnectionStringWithCustomCheckpointAndLeaseManager(
|
||||
hostName: string,
|
||||
iotHubConnectionString: string,
|
||||
checkpointManager: CheckpointManager,
|
||||
leaseManager: LeaseManager,
|
||||
options?: FromIotHubConnectionStringOptions
|
||||
): Promise<EventProcessorHost> {
|
||||
if (!options) options = {};
|
||||
|
||||
validateType("hostName", hostName, true, "string");
|
||||
validateType("iotHubConnectionString", iotHubConnectionString, true, "string");
|
||||
validateType("checkpointManager", checkpointManager, true, "object");
|
||||
validateType("leaseManager", leaseManager, true, "object");
|
||||
validateType("options", options, false, "object");
|
||||
|
||||
const client = await EventHubClient.createFromIotHubConnectionString(iotHubConnectionString);
|
||||
/* tslint:disable:no-string-literal */
|
||||
const eventHubConnectionString = client["_context"].config.connectionString;
|
||||
|
||||
const ephOptions: EventProcessorHostOptions = {
|
||||
...options,
|
||||
eventHubConnectionString: eventHubConnectionString,
|
||||
checkpointManager: checkpointManager,
|
||||
leaseManager: leaseManager,
|
||||
eventHubPath: client.eventhubName
|
||||
};
|
||||
return new EventProcessorHost(hostName, ephOptions);
|
||||
}
|
||||
}
|
|
@ -1,358 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import os from "os";
|
||||
import { v4 as uuid } from "uuid";
|
||||
import {
|
||||
EventHubClient,
|
||||
EventPosition,
|
||||
TokenProvider,
|
||||
DefaultDataTransformer,
|
||||
Dictionary,
|
||||
EventHubRuntimeInformation,
|
||||
EventHubPartitionRuntimeInformation,
|
||||
EventHubConnectionConfig
|
||||
} from "@azure/event-hubs";
|
||||
import AsyncLock from "async-lock";
|
||||
import { LeaseManager } from "./leaseManager";
|
||||
import { PumpManager } from "./pumpManager";
|
||||
import { PartitionManager } from "./partitionManager";
|
||||
import { PartitionScanner } from "./partitionScanner";
|
||||
import { BlobService } from "./blobService";
|
||||
import { AzureBlob } from "./azureBlob";
|
||||
import { AzureStorageCheckpointLeaseManager } from "./azureStorageCheckpointLeaseManager";
|
||||
import { CheckpointManager } from "./checkpointManager";
|
||||
import { validateType } from "./util/utils";
|
||||
import { PartitionContext } from "./partitionContext";
|
||||
import { BaseLease } from "./baseLease";
|
||||
import { PartitionPump } from "./partitionPump";
|
||||
import {
|
||||
EventProcessorHostOptions,
|
||||
OnEphError,
|
||||
OnReceivedMessage,
|
||||
OnReceivedError
|
||||
} from "./modelTypes";
|
||||
import {
|
||||
maxLeaseDurationInSeconds,
|
||||
minLeaseDurationInSeconds,
|
||||
defaultLeaseRenewIntervalInSeconds,
|
||||
defaultLeaseDurationInSeconds,
|
||||
defaultStartupScanDelayInSeconds,
|
||||
packageInfo,
|
||||
defaultFastScanIntervalInSeconds,
|
||||
defaultSlowScanIntervalInSeconds,
|
||||
defaultConsumerGroup
|
||||
} from "./util/constants";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface BaseHostContext {
|
||||
hostName: string;
|
||||
checkpointLock: AsyncLock;
|
||||
checkpointLockId: string;
|
||||
consumerGroup: string;
|
||||
eventHubPath: string;
|
||||
storageContainerName?: string;
|
||||
eventHubConnectionString: string;
|
||||
connectionConfig: EventHubConnectionConfig;
|
||||
onEphError: OnEphError;
|
||||
leaseRenewInterval: number;
|
||||
leaseDuration: number;
|
||||
partitionIds: string[];
|
||||
blobReferenceByPartition: Dictionary<AzureBlob>;
|
||||
storageConnectionString?: string;
|
||||
tokenProvider?: TokenProvider;
|
||||
initialOffset?: EventPosition;
|
||||
storageBlobPrefix?: string;
|
||||
blobService?: BlobService;
|
||||
composedBlobPrefix: string;
|
||||
onMessage?: OnReceivedMessage;
|
||||
onError?: OnReceivedError;
|
||||
startupScanDelay?: number;
|
||||
fastScanInterval?: number;
|
||||
slowScanInterval?: number;
|
||||
pumps: Map<string, PartitionPump>;
|
||||
userAgent: string;
|
||||
withHost(msg: string): string;
|
||||
withHostAndPartition(partition: string | { partitionId: string }, msg: string): string;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface HostContextWithCheckpointLeaseManager extends BaseHostContext {
|
||||
leaseManager: LeaseManager;
|
||||
checkpointManager: CheckpointManager;
|
||||
getEventHubClient(): EventHubClient;
|
||||
getHubRuntimeInformation(): Promise<EventHubRuntimeInformation>;
|
||||
getPartitionInformation(
|
||||
partitionId: string | number
|
||||
): Promise<EventHubPartitionRuntimeInformation>;
|
||||
getPartitionIds(): Promise<string[]>;
|
||||
}
|
||||
|
||||
export interface HostContextWithPumpManager extends HostContextWithCheckpointLeaseManager {
|
||||
pumpManager: PumpManager;
|
||||
}
|
||||
|
||||
export interface HostContext extends HostContextWithPumpManager {
|
||||
partitionManager: PartitionManager;
|
||||
partitionScanner: PartitionScanner;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export namespace HostContext {
|
||||
function _validateLeaseDurationAndRenewInterval(duration: number, interval: number): void {
|
||||
validateType("leaseDuration", duration, true, "number");
|
||||
validateType("leaseRenewInterval", interval, true, "number");
|
||||
|
||||
if (duration <= interval) {
|
||||
throw new Error(
|
||||
`Lease duration ${duration} needs to be greater than lease ` + `renew interval ${interval}.`
|
||||
);
|
||||
}
|
||||
|
||||
if (duration > maxLeaseDurationInSeconds || duration < minLeaseDurationInSeconds) {
|
||||
throw new Error(
|
||||
`Lease duration needs to be between ${minLeaseDurationInSeconds} ` +
|
||||
`seconds and ${maxLeaseDurationInSeconds} seconds. The given value is: ${duration} seconds.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function _validatestorageContainerName(name: string): void {
|
||||
if (!name || name.match(/^[a-z0-9](([a-z0-9\-[^\-])){1,61}[a-z0-9]$/gi) === null) {
|
||||
throw new Error(
|
||||
`Azure Storage lease container name "${name}" is invalid. Please check ` +
|
||||
`naming conventions at https://msdn.microsoft.com/en-us/library/azure/dd135715.aspx`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function _eitherStorageConnectionStringOrCheckpointLeaseManager(
|
||||
options: EventProcessorHostOptions
|
||||
): void {
|
||||
validateType("options", options, true, "object");
|
||||
const checkpointManager = options.checkpointManager;
|
||||
const leaseManager = options.leaseManager;
|
||||
const storageConnectionString = options.storageConnectionString;
|
||||
if (storageConnectionString) {
|
||||
if (checkpointManager || leaseManager) {
|
||||
throw new Error(
|
||||
"Either provide ('checkpointManager' and 'leaseManager') or " +
|
||||
"provide 'storageConnectionString'."
|
||||
);
|
||||
}
|
||||
} else if (!(checkpointManager && leaseManager)) {
|
||||
throw new Error(
|
||||
"Either provide ('checkpointManager' and 'leaseManager') or " +
|
||||
"provide 'storageConnectionString'."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function _createBase(hostName: string, options: EventProcessorHostOptions): BaseHostContext {
|
||||
validateType("hostName", hostName, true, "string");
|
||||
|
||||
const onEphErrorFunc: OnEphError = () => {
|
||||
// do nothing
|
||||
};
|
||||
const config = EventHubConnectionConfig.create(
|
||||
options.eventHubConnectionString!,
|
||||
options.eventHubPath
|
||||
);
|
||||
|
||||
// set defaults
|
||||
if (!options.consumerGroup) options.consumerGroup = defaultConsumerGroup;
|
||||
if (!options.eventHubPath) options.eventHubPath = config.entityPath;
|
||||
if (!options.onEphError) options.onEphError = onEphErrorFunc;
|
||||
if (!options.dataTransformer) options.dataTransformer = new DefaultDataTransformer();
|
||||
if (!options.startupScanDelay) options.startupScanDelay = defaultStartupScanDelayInSeconds;
|
||||
if (!options.fastScanInterval) options.fastScanInterval = defaultFastScanIntervalInSeconds;
|
||||
if (!options.slowScanInterval) options.slowScanInterval = defaultSlowScanIntervalInSeconds;
|
||||
|
||||
validateType("options", options, true, "object");
|
||||
validateType("options.eventHubPath", options.eventHubPath, true, "string");
|
||||
validateType(
|
||||
"options.eventHubConnectionString",
|
||||
options.eventHubConnectionString,
|
||||
true,
|
||||
"string"
|
||||
);
|
||||
validateType(
|
||||
"options.storageConnectionString",
|
||||
options.storageConnectionString,
|
||||
false,
|
||||
"string"
|
||||
);
|
||||
validateType("options.initialOffset", options.initialOffset, false, "object");
|
||||
validateType("options.consumerGroup", options.consumerGroup, false, "string");
|
||||
validateType("options.storageContainerName", options.storageContainerName, false, "string");
|
||||
validateType("options.storageBlobPrefix", options.storageBlobPrefix, false, "string");
|
||||
validateType("options.onEphError", options.onEphError, false, "function");
|
||||
_eitherStorageConnectionStringOrCheckpointLeaseManager(options);
|
||||
|
||||
if (options.leaseManager) {
|
||||
options.leaseDuration = options.leaseManager.leaseDuration;
|
||||
options.leaseRenewInterval = options.leaseManager.leaseRenewInterval;
|
||||
}
|
||||
if (!options.leaseRenewInterval)
|
||||
options.leaseRenewInterval = defaultLeaseRenewIntervalInSeconds;
|
||||
if (!options.leaseDuration) options.leaseDuration = defaultLeaseDurationInSeconds;
|
||||
|
||||
validateType("options.leaseRenewInterval", options.leaseRenewInterval, false, "number");
|
||||
validateType("options.leaseDuration", options.leaseDuration, false, "number");
|
||||
|
||||
const context: BaseHostContext = {
|
||||
hostName: hostName,
|
||||
checkpointLock: new AsyncLock({ maxPending: 100000 }),
|
||||
checkpointLockId: `checkpoint-${uuid()}`,
|
||||
eventHubConnectionString: options.eventHubConnectionString!,
|
||||
connectionConfig: config,
|
||||
eventHubPath: options.eventHubPath!,
|
||||
tokenProvider: options.tokenProvider,
|
||||
blobReferenceByPartition: {},
|
||||
partitionIds: [],
|
||||
pumps: new Map<string, PartitionPump>(),
|
||||
consumerGroup: options.consumerGroup,
|
||||
storageContainerName: options.storageContainerName,
|
||||
leaseRenewInterval: options.leaseRenewInterval,
|
||||
leaseDuration: options.leaseDuration,
|
||||
initialOffset: options.initialOffset,
|
||||
storageBlobPrefix: options.storageBlobPrefix,
|
||||
composedBlobPrefix: options.storageBlobPrefix
|
||||
? `${options.storageBlobPrefix.trim()}${options.consumerGroup}/`
|
||||
: `${options.consumerGroup}/`,
|
||||
onEphError: options.onEphError,
|
||||
startupScanDelay: options.startupScanDelay,
|
||||
fastScanInterval: options.fastScanInterval,
|
||||
slowScanInterval: options.slowScanInterval,
|
||||
userAgent: getUserAgent(options),
|
||||
withHost: (msg: string) => {
|
||||
return `[${hostName}] ${msg}`;
|
||||
},
|
||||
withHostAndPartition: (partition: string | PartitionContext | BaseLease, msg: string) => {
|
||||
let id: string = "N/A";
|
||||
if (typeof partition === "string") {
|
||||
id = partition;
|
||||
} else if (typeof partition === "object") {
|
||||
id = partition.partitionId;
|
||||
}
|
||||
return `[${hostName}] [${id}] ${msg}`;
|
||||
}
|
||||
};
|
||||
|
||||
if (options.storageConnectionString) {
|
||||
context.storageConnectionString = options.storageConnectionString;
|
||||
context.blobService = BlobService.create(hostName, options.storageConnectionString);
|
||||
}
|
||||
|
||||
_validateLeaseDurationAndRenewInterval(context.leaseDuration, context.leaseRenewInterval);
|
||||
if (context.storageContainerName) _validatestorageContainerName(context.storageContainerName);
|
||||
return context;
|
||||
}
|
||||
|
||||
function _createWithCheckpointLeaseManager(
|
||||
hostName: string,
|
||||
options: EventProcessorHostOptions
|
||||
): HostContextWithCheckpointLeaseManager {
|
||||
const ctxt = _createBase(hostName, options) as HostContextWithCheckpointLeaseManager;
|
||||
const checkpointLeaseManager = new AzureStorageCheckpointLeaseManager(ctxt);
|
||||
ctxt.leaseManager = options.leaseManager || checkpointLeaseManager;
|
||||
ctxt.checkpointManager = options.checkpointManager || checkpointLeaseManager;
|
||||
ctxt.getEventHubClient = () => {
|
||||
if (ctxt.tokenProvider) {
|
||||
return EventHubClient.createFromTokenProvider(
|
||||
ctxt.connectionConfig.host,
|
||||
ctxt.eventHubPath,
|
||||
ctxt.tokenProvider,
|
||||
{
|
||||
userAgent: ctxt.userAgent,
|
||||
webSocket: options && options.webSocket,
|
||||
webSocketConstructorOptions: options && options.webSocketConstructorOptions
|
||||
}
|
||||
);
|
||||
} else {
|
||||
return EventHubClient.createFromConnectionString(
|
||||
ctxt.eventHubConnectionString,
|
||||
ctxt.eventHubPath,
|
||||
{
|
||||
userAgent: ctxt.userAgent,
|
||||
webSocket: options && options.webSocket,
|
||||
webSocketConstructorOptions: options && options.webSocketConstructorOptions
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
ctxt.getHubRuntimeInformation = async () => {
|
||||
const client = ctxt.getEventHubClient();
|
||||
try {
|
||||
return await client.getHubRuntimeInformation();
|
||||
} finally {
|
||||
client.close().catch(/* do nothing */);
|
||||
}
|
||||
};
|
||||
ctxt.getPartitionInformation = async (id: string | number) => {
|
||||
const client = ctxt.getEventHubClient();
|
||||
try {
|
||||
return await client.getPartitionInformation(id);
|
||||
} finally {
|
||||
client.close().catch(/* do nothing */);
|
||||
}
|
||||
};
|
||||
ctxt.getPartitionIds = async () => {
|
||||
if (!ctxt.partitionIds.length) {
|
||||
const client = ctxt.getEventHubClient();
|
||||
try {
|
||||
ctxt.partitionIds = await client.getPartitionIds();
|
||||
} finally {
|
||||
client.close().catch(/* do nothing */);
|
||||
}
|
||||
}
|
||||
return ctxt.partitionIds;
|
||||
};
|
||||
return ctxt;
|
||||
}
|
||||
|
||||
function _createWithPumpManager(
|
||||
hostName: string,
|
||||
options: EventProcessorHostOptions
|
||||
): HostContextWithPumpManager {
|
||||
const context = _createWithCheckpointLeaseManager(
|
||||
hostName,
|
||||
options
|
||||
) as HostContextWithPumpManager;
|
||||
context.pumpManager = new PumpManager(context);
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* @property {string} userAgent The user agent string for the EventHubs client.
|
||||
* See guideline at https://github.com/Azure/azure-sdk/blob/main/docs/design/Telemetry.mdk
|
||||
*/
|
||||
const userAgent: string = `azsdk-js-azureeventprocessorhost/${
|
||||
packageInfo.version
|
||||
} (NODE-VERSION ${process.version}; ${os.type()} ${os.release()})`;
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export function getUserAgent(options: EventProcessorHostOptions): string {
|
||||
const finalUserAgent = options.userAgent ? `${userAgent},${options.userAgent}` : userAgent;
|
||||
return finalUserAgent;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export function create(hostName: string, options: EventProcessorHostOptions): HostContext {
|
||||
const context = _createWithPumpManager(hostName, options);
|
||||
const hostContext = context as HostContext;
|
||||
hostContext.partitionManager = new PartitionManager(context);
|
||||
hostContext.partitionScanner = new PartitionScanner(context);
|
||||
return hostContext;
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
export { EventProcessorHost } from "./eventProcessorHost";
|
||||
export { PartitionContext } from "./partitionContext";
|
||||
export { CheckpointInfo } from "./checkpointInfo";
|
||||
export { CompleteLease, CompleteLeaseInfo } from "./completeLease";
|
||||
export { BaseLease, BaseLeaseInfo } from "./baseLease";
|
||||
export { LeaseManager } from "./leaseManager";
|
||||
export { CheckpointManager } from "./checkpointManager";
|
||||
export {
|
||||
OnReceivedError,
|
||||
EPHDiagnosticInfo,
|
||||
EventProcessorHostOptions,
|
||||
FromConnectionStringOptions,
|
||||
FromTokenProviderOptions,
|
||||
OnEphError,
|
||||
OnReceivedMessage
|
||||
} from "./modelTypes";
|
||||
export {
|
||||
delay,
|
||||
EventData,
|
||||
OnError,
|
||||
EventPosition,
|
||||
EventHubPartitionRuntimeInformation,
|
||||
EventHubRuntimeInformation,
|
||||
MessagingError,
|
||||
DataTransformer,
|
||||
aadEventHubsAudience
|
||||
} from "@azure/event-hubs";
|
|
@ -1,134 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import { BaseLease } from "./baseLease";
|
||||
|
||||
/**
|
||||
* @interface LeaseManager
|
||||
* If you wish to have EventProcessorHost store leases somewhere other than Azure Storage,
|
||||
* you can write your own lease manager using this interface.
|
||||
*
|
||||
* The Azure Storage managers use the same storage for both lease and checkpoints, so both
|
||||
* interfaces are implemented by the same class. You are free to do the same thing if you have
|
||||
* a unified store for both types of data.
|
||||
*
|
||||
* This interface does not specify initialization methods because we have no way of knowing what
|
||||
* information your implementation will require.
|
||||
*/
|
||||
export interface LeaseManager {
|
||||
/**
|
||||
* @property {number} leaseRenewInterval The sleep interval **in seconds** between scans.
|
||||
*
|
||||
* Allows a lease manager implementation to specify to PartitionManager how often it should
|
||||
* scan leases and renew them. In order to redistribute leases in a timely fashion after a host
|
||||
* ceases operating, we recommend a relatively short interval, such as ten seconds. Obviously it
|
||||
* should be less than half of the lease length, to prevent accidental expiration.
|
||||
*/
|
||||
leaseRenewInterval: number;
|
||||
/**
|
||||
* @property {number} leaseDuration Duration of a lease **in seconds** before it expires
|
||||
* unless renewed.
|
||||
*/
|
||||
leaseDuration: number;
|
||||
/**
|
||||
* Does the lease store exist?
|
||||
* @returns {Promise<boolean>} Promise<boolean> `true` if it exists, `false` if it does not exist.
|
||||
*/
|
||||
leaseStoreExists(): Promise<boolean>;
|
||||
/**
|
||||
* Create the lease store if it doesn't exist. Do nothing if it does exist.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
createLeaseStoreIfNotExists(): Promise<void>;
|
||||
/**
|
||||
* Delete lease store.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
deleteLeaseStore(): Promise<void>;
|
||||
/**
|
||||
* Gets the lease info for the specified partition. Can return `undefined` if no lease has been
|
||||
* created in the store for the specified partition.
|
||||
* @param {string} partitionId Partition id to get the lease for.
|
||||
* @returns {Promise<CompleteLease | undefined>} Promise<Lease | undefined>
|
||||
*/
|
||||
getLease(partitionId: string): Promise<CompleteLease | undefined>;
|
||||
/**
|
||||
* Returns lightweight BaseLease for all leases, which includes name of owning host and whether
|
||||
* lease is expired. An implementation is free to return CompleteLease or its own class derived
|
||||
* from CompleteLease, but it is important that getAllLeases run as fast as possible. If it is
|
||||
* faster to obtain only the information required for a BaseLease, we heavily recommend doing that.
|
||||
* @returns {Promise<BaseLease[]>} Promise<BaseLease[]>
|
||||
*/
|
||||
getAllLeases(): Promise<BaseLease[]>;
|
||||
/**
|
||||
* Create in the store the lease info for the given partition, if it does not exist. Do nothing
|
||||
* if it does exist in the store already.
|
||||
*
|
||||
* @param {string[]} partitionIds ids of partitions to create lease info for
|
||||
* @returns {Promise<void>} Promise<void> undefined on success, rejects on error.
|
||||
*/
|
||||
createAllLeasesIfNotExists(partitionIds: string[]): Promise<void>;
|
||||
/**
|
||||
* Delete the lease info for the given partition from the store. If there is no stored lease for
|
||||
* the given partition, that is treated as success.
|
||||
*
|
||||
* @param {CompleteLease} lease Lease info for the desired partition as previously obtained from
|
||||
* `getLease()`.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
deleteLease(lease: CompleteLease): Promise<void>;
|
||||
/**
|
||||
* Acquire the lease on the desired partition for this EventProcessorHost.
|
||||
*
|
||||
* Note that it is legal to acquire a lease that is already owned by another host.
|
||||
* Lease-stealing is how partitions are redistributed when additional hosts are started.
|
||||
*
|
||||
* The existing Azure Storage implementation can experience races between two host instances
|
||||
* attempting to acquire or steal the lease at the same time. To avoid situations where two host
|
||||
* instances both believe that they own the lease, acquisition can fail without errors by
|
||||
* returning false and should do so when there is any doubt -- the worst that can happen is that
|
||||
* no host instance owns the lease for a short time. This is qualitatively different from,
|
||||
* for example, the underlying store throwing an access exception, which is an error.
|
||||
*
|
||||
* @param {CompleteLease} lease Lease info for the desired partition as previously obtained from
|
||||
* `getLease()`.
|
||||
* @returns {Promise<boolean>} Promise<boolean> `true` if acquired successfully; `false` otherwise.
|
||||
*/
|
||||
acquireLease(lease: CompleteLease): Promise<boolean>;
|
||||
/**
|
||||
* Renew a lease currently held by this host.
|
||||
*
|
||||
* If the lease has been taken by another host instance (either stolen or after expiration)
|
||||
* or explicitly released, renewLease must return false. With the Azure Storage-based
|
||||
* implementation, it IS possible to renew an expired lease that has not been taken by another
|
||||
* host, so your implementation can allow that or not, whichever is convenient. If it does not,
|
||||
* renewLease should return false.
|
||||
*
|
||||
* @param {CompleteLease} lease lease to be renewed.
|
||||
* @returns {Promise<boolean>} Promise<boolean> `true` if renewed successfully; `false` otherwise.
|
||||
*/
|
||||
renewLease(lease: CompleteLease): Promise<boolean>;
|
||||
/**
|
||||
* Give up a lease currently held by this host.
|
||||
*
|
||||
* If the lease has been stolen, or expired, releasing it is unnecessary, and will fail if
|
||||
* attempted.
|
||||
*
|
||||
* @param {CompleteLease} lease Lease info for the desired partition as previously obtained from
|
||||
* `getLease()`.
|
||||
* @returns {Promise<void>} Promise<void> resolves with `undefined`; rejects with an `Error`.
|
||||
*/
|
||||
releaseLease(lease: CompleteLease): Promise<void>;
|
||||
/**
|
||||
* Update the store with the information in the provided lease.
|
||||
*
|
||||
* It is necessary to currently hold a lease in order to update it. If the lease has been stolen,
|
||||
* or expired, or released, it cannot be updated. Lease manager implementations should renew the
|
||||
* lease before performing the update to avoid lease expiration during the process.
|
||||
*
|
||||
* @param {CompleteLease} lease New lease information to be stored.
|
||||
* @returns {Promise<boolean>} Promise<boolean> `true` if updated successfully; `false` otherwise.
|
||||
*/
|
||||
updateLease(lease: CompleteLease): Promise<boolean>;
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import debugModule from "debug";
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for error
|
||||
*/
|
||||
export const error = debugModule("azure:eph:error");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for lease
|
||||
*/
|
||||
export const baseLease = debugModule("azure:eph:baseLease");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for lease
|
||||
*/
|
||||
export const completeLease = debugModule("azure:eph:completeLease");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for azurebloblease
|
||||
*/
|
||||
export const azurebloblease = debugModule("azure:eph:azurebloblease");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for leaseManager
|
||||
*/
|
||||
export const leaseManager = debugModule("azure:eph:leaseManager");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for partitionManager
|
||||
*/
|
||||
export const partitionManager = debugModule("azure:eph:partitionManager");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for partitionManager
|
||||
*/
|
||||
export const partitionPump = debugModule("azure:eph:partitionPump");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for pumpManager
|
||||
*/
|
||||
export const pumpManager = debugModule("azure:eph:pumpManager");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for partitionScanner
|
||||
*/
|
||||
export const partitionScanner = debugModule("azure:eph:partitionScanner");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for host
|
||||
*/
|
||||
export const host = debugModule("azure:eph:host");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for blobService
|
||||
*/
|
||||
export const blobService = debugModule("azure:eph:blobService");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for partitionContext
|
||||
*/
|
||||
export const partitionContext = debugModule("azure:eph:partitionContext");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for checkpointLeaseMgr
|
||||
*/
|
||||
export const checkpointLeaseMgr = debugModule("azure:eph:checkpointLeaseMgr");
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for checkpointLeaseMgr
|
||||
*/
|
||||
export const checkpoint = debugModule("azure:eph:checkpointInfo");
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
* log statements for checkpointLeaseMgr
|
||||
*/
|
||||
export const util = debugModule("azure:eph:util");
|
|
@ -1,204 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { PartitionContext } from "./partitionContext";
|
||||
import {
|
||||
EventData,
|
||||
MessagingError,
|
||||
EventPosition,
|
||||
TokenProvider,
|
||||
ClientOptionsBase
|
||||
} from "@azure/event-hubs";
|
||||
import { CheckpointManager } from "./checkpointManager";
|
||||
import { LeaseManager } from "./leaseManager";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export enum CloseReason {
|
||||
leaseLost = "LeaseLost",
|
||||
shutdown = "ShutDown"
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides information about internal errors that occur while managing partitions or leases for
|
||||
* the partitions.
|
||||
* @interface EPHDiagnosticInfo
|
||||
*/
|
||||
export interface EPHDiagnosticInfo {
|
||||
/**
|
||||
* @property {string} hostName The name of the host that experienced the error. Allows
|
||||
* distinguishing the error source if multiple hosts in a single process.
|
||||
*/
|
||||
hostName: string;
|
||||
/**
|
||||
* @property {string} partitionId The partitionId that experienced the error. Allows
|
||||
* distinguishing the error source if multiple hosts in a single process.
|
||||
*/
|
||||
partitionId: string;
|
||||
/**
|
||||
* @property {string} action A short string that indicates what general activity threw the
|
||||
* error.
|
||||
*/
|
||||
action: string;
|
||||
/**
|
||||
* @property {any} error The error that was thrown.
|
||||
*/
|
||||
error: any;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the error handler signature to receive notifcation for general errors.
|
||||
*
|
||||
* Errors which occur while processing events from a particular EventHub partition are delivered
|
||||
* to the `onError` handler provided in the `start()` method. This handler is called on
|
||||
* occasions when an error occurs while managing partitions or leases for the partitions.
|
||||
* @function
|
||||
*/
|
||||
export type OnEphError = (error: EPHDiagnosticInfo) => void;
|
||||
|
||||
/**
|
||||
* Describes the message handler signature for messages received from an EventHub.
|
||||
* @function
|
||||
*/
|
||||
export type OnReceivedMessage = (context: PartitionContext, eventData: EventData) => void;
|
||||
|
||||
/**
|
||||
* Describes the message handler signature for errors that occur while receiving messages from an
|
||||
* EventHub.
|
||||
* @function
|
||||
*/
|
||||
export type OnReceivedError = (error: MessagingError | Error) => void;
|
||||
|
||||
/**
|
||||
* Describes the optional parameters that can be provided for creating an EventProcessorHost while
|
||||
* creating from an iothub connectionstring.
|
||||
* @interface FromIotHubConnectionStringOptions
|
||||
*/
|
||||
export type FromIotHubConnectionStringOptions = FromTokenProviderOptions;
|
||||
|
||||
/**
|
||||
* Describes the optional parameters that can be provided for creating an EventProcessorHost while
|
||||
* creating from the given token provider
|
||||
* @interface FromTokenProviderOptions
|
||||
*/
|
||||
export interface FromTokenProviderOptions extends ClientOptionsBase {
|
||||
/**
|
||||
* @property {EventPosition} initialOffset This is only used when then receiver is being created
|
||||
* for the very first time and there is no checkpoint data in the blob. For this option to be
|
||||
* effective please make sure to provide a new hostName that was not used previously.
|
||||
*/
|
||||
initialOffset?: EventPosition;
|
||||
/**
|
||||
* @property {string} [consumerGroup] The name of the consumer group within the Event Hub. Default
|
||||
* value: **`"$default"`**.
|
||||
*/
|
||||
consumerGroup?: string;
|
||||
/**
|
||||
* @property {string} [storageBlobPrefix] Prefix used when naming blobs within the storage
|
||||
* container.
|
||||
*/
|
||||
storageBlobPrefix?: string;
|
||||
/**
|
||||
* @property {OnEphError} [onEphError] Error handler that can be provided to receive notifcation
|
||||
* for general errors.
|
||||
*
|
||||
* Errors which occur while processing events from a particular EventHub partition are delivered
|
||||
* to the `onError` handler provided in the `start()` method. This handler is called on
|
||||
* occasions when an error occurs while managing partitions or leases for the partitions.
|
||||
*/
|
||||
onEphError?: OnEphError;
|
||||
/**
|
||||
* @property {number} [leaseRenewInterval] The sleep interval **`in seconds`** between scans.
|
||||
* Default: **`10` seconds**.
|
||||
*
|
||||
* Allows a lease manager implementation to specify to PartitionManager how often it should
|
||||
* scan leases and renew them. In order to redistribute leases in a timely fashion after a host
|
||||
* ceases operating, we recommend a relatively short interval, such as ten seconds. Obviously it
|
||||
* should be less than half of the lease length, to prevent accidental expiration.
|
||||
*
|
||||
* If `Leasemanager` is provided when creating the EventProcessorHost then this value will be ignored.
|
||||
*/
|
||||
leaseRenewInterval?: number;
|
||||
/**
|
||||
* @property {number} [leaseDuration] Duration of a lease **`in seconds`** before it expires
|
||||
* unless renewed. Default: **`30` seconds**, Min Value: **`15` seconds**,
|
||||
* Max value: **`60` seconds**.
|
||||
*
|
||||
* If `Leasemanager` is provided when creating the EventProcessorHost then this value will be ignored.
|
||||
*/
|
||||
leaseDuration?: number;
|
||||
/**
|
||||
* @property {number} [startupScanDelay] The delay time **`in seconds`** between the first scan
|
||||
* for available partitions and the second. This is part of a startup optimization which allows
|
||||
* individual hosts to become visible to other hosts, and thereby get a more accurate count
|
||||
* of the number of hosts in the system, before they try to estimate how many partitions they
|
||||
* should own. Default: **`30` seconds**.
|
||||
*/
|
||||
startupScanDelay?: number;
|
||||
/**
|
||||
* @property {number} [fastScanInterval] There are two possible interval times between scans for
|
||||
* available partitions, fast and slow. The fast (short) interval **`in seconds`** is used after
|
||||
* a scan in which lease stealing has occurred, to promote quicker rebalancing.
|
||||
* Default: **`3` seconds**.
|
||||
*/
|
||||
fastScanInterval?: number;
|
||||
/**
|
||||
* @property {number} [slowScanInterval] The slow (long) interval **`in seconds`** is used
|
||||
* after a scan in which lease stealing did not occur, to reduce unnecessary scanning when
|
||||
* the system is in steady state. Default: **`5` seconds**.
|
||||
*/
|
||||
slowScanInterval?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the optional parameters that can be provided for creating an EventProcessorHost while
|
||||
* creating from the eventhub connection string.
|
||||
* @interface FromConnectionStringOptions
|
||||
*/
|
||||
export interface FromConnectionStringOptions extends FromTokenProviderOptions {
|
||||
/**
|
||||
* @property {string} [eventHubPath] The name of the EventHub. This is optional if the
|
||||
* EventHub connection string contains EntityPath=hub-name else an Error will be thrown.
|
||||
*/
|
||||
eventHubPath?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Describes the optional parameters that can be provided for creating an EventProcessorHost.
|
||||
* @interface EventProcessorHostOptions
|
||||
*/
|
||||
export interface EventProcessorHostOptions extends FromConnectionStringOptions {
|
||||
/**
|
||||
* @property {string} [eventHubConnectionString] Connection string for the Event Hub to receive
|
||||
* from. Example: "Endpoint=sb://my-servicebus-namespace.servicebus.windows.net/;
|
||||
* SharedAccessKeyName=my-SA-name;SharedAccessKey=my-SA-key".
|
||||
*/
|
||||
eventHubConnectionString?: string;
|
||||
/**
|
||||
* @property {string} [storageConnectionString] Connection string to Azure Storage account used
|
||||
* for leases and checkpointing. Example "DefaultEndpointsProtocol=https;AccountName=<account-name>;
|
||||
* AccountKey=<account-key>;EndpointSuffix=core.windows.net"
|
||||
*/
|
||||
storageConnectionString?: string;
|
||||
/**
|
||||
* @property {string} [storageContainerName] Azure Storage container name for use by built-in
|
||||
* lease and checkpoint manager.
|
||||
*/
|
||||
storageContainerName?: string;
|
||||
/**
|
||||
* @property {CheckpointManager} [checkpointManager] A manager to manage checkpoints.
|
||||
* Default: **`AzureStorageCheckpointLeaseManager`**.
|
||||
*/
|
||||
checkpointManager?: CheckpointManager;
|
||||
/**
|
||||
* @property {LeaseManager} [LeaseManager] A manager to manage leases. Default:
|
||||
* **`AzureStorageCheckpointLeaseManager`**.
|
||||
*/
|
||||
leaseManager?: LeaseManager;
|
||||
/**
|
||||
* @property {TokenProvider} [tokenProvider] An instance of the token provider interface that
|
||||
* provides the token for authentication. Default value: **`SasTokenProvider`**.
|
||||
*/
|
||||
tokenProvider?: TokenProvider;
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { EventData, EventPosition } from "@azure/event-hubs";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import { CheckpointInfo } from "./checkpointInfo";
|
||||
import * as log from "./log";
|
||||
import { HostContextWithCheckpointLeaseManager } from "./hostContext";
|
||||
import { validateType } from "./util/utils";
|
||||
|
||||
/**
|
||||
* Describes the Partition Context.
|
||||
* @class PartitionContext
|
||||
*/
|
||||
export class PartitionContext {
|
||||
/**
|
||||
* @property {Lease} lease The most recdent checkpointed lease with the partitionId.
|
||||
*/
|
||||
lease: CompleteLease;
|
||||
/**
|
||||
* @property {string} partitionId The eventhub partition id.
|
||||
* @readonly
|
||||
*/
|
||||
readonly partitionId: string;
|
||||
/**
|
||||
* @property {string} owner The host/owner of the partition.
|
||||
* @readonly
|
||||
*/
|
||||
get owner(): string {
|
||||
return this.lease.owner;
|
||||
}
|
||||
/**
|
||||
* @property {string} eventhubPath The path of the eventhub
|
||||
* @readonly
|
||||
*/
|
||||
get eventhubPath(): string {
|
||||
return this._context.eventHubPath;
|
||||
}
|
||||
/**
|
||||
* @property {string} consumerGroup The name of the consumer group.
|
||||
* @readonly
|
||||
*/
|
||||
get consumerGroup(): string {
|
||||
return this._context.consumerGroup;
|
||||
}
|
||||
|
||||
private _context: HostContextWithCheckpointLeaseManager;
|
||||
private _offset: string = EventPosition.startOfStream;
|
||||
private _sequenceNumber: number = 0;
|
||||
|
||||
/**
|
||||
* Creates a new PartitionContext.
|
||||
* @param {string} partitionId The eventhub partition id.
|
||||
* @param {string} owner The name of the owner.
|
||||
* @param {CompleteLease} lease The lease object.
|
||||
*/
|
||||
constructor(
|
||||
context: HostContextWithCheckpointLeaseManager,
|
||||
partitionId: string,
|
||||
lease: CompleteLease
|
||||
) {
|
||||
this._context = context;
|
||||
this.partitionId = partitionId;
|
||||
this.lease = lease;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the offset and sequence number of the partition context from the provided EventData.
|
||||
* @param {EventData} eventData The event data `received` from the EventHubReceiver.
|
||||
*/
|
||||
setOffsetAndSequenceNumber(eventData: EventData): void {
|
||||
validateType("eventData", eventData, true, "object");
|
||||
validateType("eventData.offset", eventData.offset, true, "string");
|
||||
validateType("eventData.sequenceNumber", eventData.sequenceNumber, true, "number");
|
||||
this._offset = eventData.offset!;
|
||||
this._sequenceNumber = eventData.sequenceNumber!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the current offset and sequenceNumber to the checkpoint store via the checkpoint manager.
|
||||
*
|
||||
* The checkpoint data is structured as a JSON payload (example):
|
||||
* `{ "partitionId":"0","owner":"ephtest","token":"48e209e3-55f0-41b8-a8dd-d9c09ff6c35a",
|
||||
* "epoch":1,"offset":"","SequenceNumber":0 }`.
|
||||
*
|
||||
* @return {Promise<void>}
|
||||
*/
|
||||
async checkpoint(): Promise<void> {
|
||||
const capturedCheckpoint: CheckpointInfo = {
|
||||
offset: this._offset,
|
||||
partitionId: this.partitionId,
|
||||
sequenceNumber: this._sequenceNumber
|
||||
};
|
||||
const withHostAndPartiton = this._context.withHostAndPartition;
|
||||
log.partitionContext(withHostAndPartiton(this, "Checkpointing: %O"), capturedCheckpoint);
|
||||
await this._context.checkpointLock.acquire(this._context.checkpointLockId, () => {
|
||||
return this._persistCheckpoint(capturedCheckpoint);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the current offset and sequenceNumber to the checkpoint store via the checkpoint manager.
|
||||
*
|
||||
* The checkpoint data is structured as a JSON payload (example):
|
||||
* `{ "partitionId":"0","owner":"ephtest","token":"48e209e3-55f0-41b8-a8dd-d9c09ff6c35a",
|
||||
* "epoch":1,"offset":"","SequenceNumber":0 }`.
|
||||
*
|
||||
* @param {EventData} eventData The event data received from the EventHubReceiver.
|
||||
* @return {Promise<void>}
|
||||
*/
|
||||
async checkpointFromEventData(eventData: EventData): Promise<void> {
|
||||
const data = CheckpointInfo.createFromEventData(this.partitionId, eventData);
|
||||
const withHostAndPartiton = this._context.withHostAndPartition;
|
||||
log.partitionContext(withHostAndPartiton(this, "Checkpointing from ED: %O"), data);
|
||||
await this._context.checkpointLock.acquire(this._context.checkpointLockId, () => {
|
||||
return this._persistCheckpoint(data);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
async getInitialOffset(): Promise<EventPosition> {
|
||||
const startingCheckpoint = await this._context.checkpointManager.getCheckpoint(
|
||||
this.partitionId
|
||||
);
|
||||
const withHostAndPartiton = this._context.withHostAndPartition;
|
||||
let result: EventPosition;
|
||||
if (!startingCheckpoint) {
|
||||
if (this._context.initialOffset) {
|
||||
log.partitionContext(
|
||||
withHostAndPartiton(this, "User provided initial offset: %s"),
|
||||
this._context.initialOffset.getExpression()
|
||||
);
|
||||
}
|
||||
result = this._context.initialOffset || EventPosition.fromOffset(this._offset);
|
||||
} else {
|
||||
if (startingCheckpoint.offset != undefined) this._offset = startingCheckpoint.offset;
|
||||
if (startingCheckpoint.sequenceNumber != undefined)
|
||||
this._sequenceNumber = startingCheckpoint.sequenceNumber;
|
||||
result = EventPosition.fromOffset(this._offset);
|
||||
log.partitionContext(
|
||||
withHostAndPartiton(this, "Retrieved starting offset/sequence " + "number: %s/%d"),
|
||||
this._offset,
|
||||
this._sequenceNumber
|
||||
);
|
||||
}
|
||||
log.partitionContext(
|
||||
withHostAndPartiton(
|
||||
this,
|
||||
"Initial position provider offset: %s, " + "sequenceNumber: %d, enqueuedTime: %d"
|
||||
),
|
||||
result.offset,
|
||||
result.sequenceNumber,
|
||||
result.enqueuedTime
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private async _persistCheckpoint(checkpoint: CheckpointInfo): Promise<void> {
|
||||
const withHostAndPartiton = this._context.withHostAndPartition;
|
||||
try {
|
||||
const inStoreCheckpoint = await this._context.checkpointManager.getCheckpoint(
|
||||
checkpoint.partitionId
|
||||
);
|
||||
if (inStoreCheckpoint && inStoreCheckpoint.sequenceNumber >= checkpoint.sequenceNumber) {
|
||||
const msg =
|
||||
`Ignoring out of date checkpoint with offset: '${checkpoint.offset}', ` +
|
||||
`sequenceNumber: ${checkpoint.sequenceNumber} because currently persisted checkpoint ` +
|
||||
` has higher offset '${inStoreCheckpoint.offset}', sequenceNumber ` +
|
||||
`${inStoreCheckpoint.sequenceNumber}.`;
|
||||
log.error(withHostAndPartiton(this, "%s"), msg);
|
||||
throw new Error(msg);
|
||||
}
|
||||
log.partitionContext(withHostAndPartiton(this, "Persisting the checkpoint: %O."), checkpoint);
|
||||
await this._context.checkpointManager.updateCheckpoint(this.lease, checkpoint);
|
||||
log.partitionContext(
|
||||
withHostAndPartiton(this, "Successfully persisted the checkpoint: %O."),
|
||||
checkpoint
|
||||
);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while checkpointing info for partition ` +
|
||||
`'${checkpoint.partitionId}': ${err ? err.stack : JSON.stringify(err)}.`;
|
||||
log.error(withHostAndPartiton(this, "%s"), msg);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,251 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { HostContextWithPumpManager } from "./hostContext";
|
||||
import { validateType, RetryConfig, retry, EPHActionStrings } from "./util/utils";
|
||||
import { delay } from "@azure/event-hubs";
|
||||
import * as log from "./log";
|
||||
import { OnReceivedMessage, OnReceivedError, CloseReason } from "./modelTypes";
|
||||
import { PartitionScanner } from "./partitionScanner";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class PartitionManager {
|
||||
private _context: HostContextWithPumpManager;
|
||||
private _partitionScanner: PartitionScanner;
|
||||
private _isCancelRequested: boolean = false;
|
||||
private _isRunning: boolean = false;
|
||||
private _runTask?: Promise<void>;
|
||||
|
||||
constructor(context: HostContextWithPumpManager) {
|
||||
this._context = context;
|
||||
this._partitionScanner = new PartitionScanner(this._context);
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
async start(onMessage: OnReceivedMessage, onError: OnReceivedError): Promise<void> {
|
||||
validateType("onMessage", onMessage, true, "function");
|
||||
validateType("onError", onError, true, "function");
|
||||
if (this._isRunning) {
|
||||
throw new Error("A partition manager cannot be started multiple times.");
|
||||
}
|
||||
|
||||
try {
|
||||
this._reset();
|
||||
this._isRunning = true;
|
||||
this._context.onMessage = onMessage;
|
||||
this._context.onError = onError;
|
||||
await this._cachePartitionIds();
|
||||
await this._initializeStores();
|
||||
this._runTask = this._run();
|
||||
} catch (err: any) {
|
||||
this._isRunning = false;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
async stop(): Promise<void> {
|
||||
const withHost = this._context.withHost;
|
||||
this._isCancelRequested = true;
|
||||
const localRunTask = this._runTask;
|
||||
if (localRunTask) {
|
||||
try {
|
||||
await localRunTask;
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while stopping the run task: ` +
|
||||
`${err ? err.stack : JSON.stringify(err)}.`;
|
||||
log.error(withHost("%s"), msg);
|
||||
} finally {
|
||||
this._isRunning = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
shouldStop(): boolean {
|
||||
if (this._isCancelRequested) {
|
||||
log.partitionManager(
|
||||
this._context.withHost(
|
||||
"Cancellation was requested -> %s. " + "Hence stopping further execution."
|
||||
),
|
||||
this._isCancelRequested
|
||||
);
|
||||
}
|
||||
return this._isCancelRequested;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private _reset(): void {
|
||||
const withHost = this._context.withHost;
|
||||
log.partitionManager(withHost("Resetting the partition manager."));
|
||||
this._context.blobReferenceByPartition = {};
|
||||
this._context.onMessage = undefined;
|
||||
this._context.onError = undefined;
|
||||
this._isRunning = false;
|
||||
this._isCancelRequested = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private async _run(): Promise<void> {
|
||||
const withHost = this._context.withHost;
|
||||
try {
|
||||
await this._scan(true);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred in the main loop of the partition ` +
|
||||
`manager: ${err ? err.stack : JSON.stringify(err)}. Hence shutting down.`;
|
||||
log.error(withHost("%s"), msg);
|
||||
this._context.onEphError({
|
||||
hostName: this._context.hostName,
|
||||
partitionId: "N/A",
|
||||
error: err,
|
||||
action: EPHActionStrings.partitionManagerMainLoop
|
||||
});
|
||||
}
|
||||
try {
|
||||
// clean up
|
||||
log.partitionManager(withHost("Shutting down all the receivers."));
|
||||
await this._context.pumpManager.removeAllPumps(CloseReason.shutdown);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while shutting down the partition ` +
|
||||
`manager: ${err ? err.stack : JSON.stringify(err)}.`;
|
||||
log.error(withHost("%s"), msg);
|
||||
this._context.onEphError({
|
||||
hostName: this._context.hostName,
|
||||
partitionId: "N/A",
|
||||
error: err,
|
||||
action: EPHActionStrings.partitionManagerCleanup
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private async _cachePartitionIds(): Promise<void> {
|
||||
const hostName = this._context.hostName;
|
||||
const withHost = this._context.withHost;
|
||||
if (!this._context.partitionIds.length) {
|
||||
log.partitionManager(withHost("Get the list of partition ids."));
|
||||
const config: RetryConfig<string[]> = {
|
||||
hostName: hostName,
|
||||
operation: () => this._context.getPartitionIds(),
|
||||
retryMessage: "Failure getting partition ids for this Event Hub, retrying",
|
||||
finalFailureMessage: "Out of retries for getting partition ids for this Event Hub",
|
||||
action: EPHActionStrings.gettingPartitionIds,
|
||||
maxRetries: 5
|
||||
};
|
||||
await retry<string[]>(config);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private async _initializeStores(): Promise<void> {
|
||||
const hostName = this._context.hostName;
|
||||
const withHost = this._context.withHost;
|
||||
const leaseManager = this._context.leaseManager;
|
||||
const checkpointManager = this._context.checkpointManager;
|
||||
|
||||
validateType("this._context.onMessage", this._context.onMessage, true, "function");
|
||||
validateType("this._context.onError", this._context.onError, true, "function");
|
||||
|
||||
log.partitionManager(withHost("Ensuring that the lease store exists."));
|
||||
if (!(await leaseManager.leaseStoreExists())) {
|
||||
const config: RetryConfig<void> = {
|
||||
hostName: hostName,
|
||||
operation: () => leaseManager.createLeaseStoreIfNotExists(),
|
||||
retryMessage: "Failure creating lease store for this Event Hub, retrying",
|
||||
finalFailureMessage: "Out of retries for creating lease store for this Event Hub",
|
||||
action: EPHActionStrings.creatingLeaseStore,
|
||||
maxRetries: 5
|
||||
};
|
||||
await retry<void>(config);
|
||||
}
|
||||
|
||||
if (this.shouldStop()) return;
|
||||
|
||||
log.partitionManager(withHost("Ensure the checkpointstore exists."));
|
||||
if (!(await checkpointManager.checkpointStoreExists())) {
|
||||
const config: RetryConfig<void> = {
|
||||
hostName: hostName,
|
||||
operation: () => checkpointManager.createCheckpointStoreIfNotExists(),
|
||||
retryMessage: "Failure creating checkpoint store for this Event Hub, retrying",
|
||||
finalFailureMessage: "Out of retries for creating checkpoint store for this Event Hub",
|
||||
action: EPHActionStrings.creatingCheckpointStore,
|
||||
maxRetries: 5
|
||||
};
|
||||
await retry<void>(config);
|
||||
}
|
||||
|
||||
if (this.shouldStop()) return;
|
||||
|
||||
log.partitionManager(withHost("Ensure that the leases exist."));
|
||||
const leaseConfig: RetryConfig<void> = {
|
||||
hostName: hostName,
|
||||
operation: () => leaseManager.createAllLeasesIfNotExists(this._context.partitionIds),
|
||||
retryMessage: "Failure creating lease for partition, retrying",
|
||||
finalFailureMessage: "Out of retries for creating lease for partition",
|
||||
action: EPHActionStrings.creatingLease,
|
||||
maxRetries: 5
|
||||
};
|
||||
await retry<void>(leaseConfig);
|
||||
|
||||
if (this.shouldStop()) return;
|
||||
|
||||
log.partitionManager(withHost("Ensure that the checkpoint exists."));
|
||||
const checkpointConfig: RetryConfig<void> = {
|
||||
hostName: hostName,
|
||||
operation: () =>
|
||||
checkpointManager.createAllCheckpointsIfNotExists(this._context.partitionIds),
|
||||
retryMessage: "Failure creating checkpoint for partition, retrying",
|
||||
finalFailureMessage: "Out of retries for creating checkpoint for partition",
|
||||
action: EPHActionStrings.creatingCheckpoint,
|
||||
maxRetries: 5
|
||||
};
|
||||
await retry<void>(checkpointConfig);
|
||||
|
||||
if (this.shouldStop()) return;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
private async _scan(isFirst: boolean): Promise<void> {
|
||||
const withHost = this._context.withHost;
|
||||
while (!this.shouldStop()) {
|
||||
if (isFirst) {
|
||||
log.partitionManager(withHost("Starting the first scan."));
|
||||
}
|
||||
const didSteal = await this._partitionScanner.scan(isFirst);
|
||||
log.partitionManager(withHost("Did we steal any leases in this scan: %s."), didSteal);
|
||||
let seconds: number = didSteal
|
||||
? this._context.fastScanInterval!
|
||||
: this._context.slowScanInterval!;
|
||||
if (isFirst) {
|
||||
seconds = this._context.startupScanDelay!;
|
||||
isFirst = false;
|
||||
}
|
||||
log.partitionManager(
|
||||
withHost("Sleeping for %d seconds before starting the next scan."),
|
||||
seconds
|
||||
);
|
||||
await delay(seconds * 1000);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,308 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import * as log from "./log";
|
||||
import { HostContextWithCheckpointLeaseManager } from "./hostContext";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import {
|
||||
ReceiveHandler,
|
||||
EventHubClient,
|
||||
EventPosition,
|
||||
ReceiveOptions,
|
||||
EventData,
|
||||
MessagingError,
|
||||
OnMessage,
|
||||
OnError,
|
||||
ErrorNameConditionMapper
|
||||
} from "@azure/event-hubs";
|
||||
import { PartitionContext } from "./partitionContext";
|
||||
import { CloseReason, OnReceivedMessage, OnReceivedError } from "./modelTypes";
|
||||
import { AzureBlobLease } from "./azureBlobLease";
|
||||
import { EPHActionStrings } from "./util/utils";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class PartitionPump {
|
||||
private _context: HostContextWithCheckpointLeaseManager;
|
||||
private _lease: CompleteLease;
|
||||
private _partitionContext: PartitionContext;
|
||||
private _onMessage: OnReceivedMessage;
|
||||
private _onError: OnReceivedError;
|
||||
private _client?: EventHubClient;
|
||||
private _receiveHandler?: ReceiveHandler;
|
||||
private _leaseRenewalTimer?: NodeJS.Timer;
|
||||
|
||||
constructor(
|
||||
context: HostContextWithCheckpointLeaseManager,
|
||||
lease: CompleteLease,
|
||||
onMessage: OnReceivedMessage,
|
||||
onError: OnReceivedError
|
||||
) {
|
||||
this._context = context;
|
||||
this._lease = lease;
|
||||
this._partitionContext = new PartitionContext(this._context, lease.partitionId, lease);
|
||||
this._onMessage = onMessage;
|
||||
this._onError = onError;
|
||||
}
|
||||
|
||||
get lease(): CompleteLease {
|
||||
return this._lease;
|
||||
}
|
||||
set lease(newLease: CompleteLease) {
|
||||
this._lease = newLease;
|
||||
if (this._partitionContext) {
|
||||
this._partitionContext.lease = this._lease;
|
||||
}
|
||||
}
|
||||
|
||||
isOpen(): boolean {
|
||||
if (this._receiveHandler && this._receiveHandler.isReceiverOpen) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async start(): Promise<void> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
await this._createNewReceiver();
|
||||
await this._scheduleLeaseRenewer();
|
||||
log.partitionPump(
|
||||
withHostAndPartition(
|
||||
this._lease,
|
||||
"Successfully started the receiver and scheduled lease renewer."
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
async stop(reason: CloseReason): Promise<void> {
|
||||
await this._removeReceiver(reason);
|
||||
}
|
||||
|
||||
private async _createNewReceiver(): Promise<void> {
|
||||
const partitionId = this._partitionContext.partitionId;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
this._client = this._context.getEventHubClient();
|
||||
} catch (err: any) {
|
||||
log.error(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"An error occurred while creating " + "the eventhub client: %O."
|
||||
),
|
||||
err
|
||||
);
|
||||
throw err;
|
||||
}
|
||||
log.partitionPump(withHostAndPartition(partitionId, "Getting the initial offset."));
|
||||
const eventPosition: EventPosition = await this._partitionContext.getInitialOffset();
|
||||
this._context.pumps.set(partitionId, this);
|
||||
log.partitionPump(withHostAndPartition(partitionId, "Added the pump to the internal map."));
|
||||
let receiveHandler: ReceiveHandler;
|
||||
const rcvrOptions: ReceiveOptions = {
|
||||
consumerGroup: this._context.consumerGroup,
|
||||
eventPosition: eventPosition,
|
||||
epoch: this._lease.epoch
|
||||
};
|
||||
const onMessage: OnMessage = (eventData: EventData) => {
|
||||
this._partitionContext.setOffsetAndSequenceNumber(eventData);
|
||||
this._onMessage(this._partitionContext, eventData);
|
||||
};
|
||||
const onError: OnError = async (error: MessagingError | Error) => {
|
||||
log.error(
|
||||
withHostAndPartition(partitionId, "Receiver '%s' received an error: %O."),
|
||||
receiveHandler.address,
|
||||
error
|
||||
);
|
||||
// Let the user know about the error only if it is not ReceiverDisconnectedError.
|
||||
// This error happens when another instance of EPH connects a receiver with a higher epoch
|
||||
// value to a partition in the same consumer group that this receiver was connected to.
|
||||
// This happens due to lease being stolen or current lease expiring, which is expected.
|
||||
// Hence not reporting suxh errors to the user will try to make it less confusing for the user.
|
||||
if (!this._isReceiverDisconnectedError(error)) {
|
||||
this._onError!(error);
|
||||
}
|
||||
try {
|
||||
await this._removeReceiver(CloseReason.shutdown);
|
||||
} catch (err: any) {
|
||||
log.error(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"Since we received an error %O " +
|
||||
"on the error handler for receiver with address '%s', we tried closing it. However, " +
|
||||
"error occurred while closing it and it is: %O."
|
||||
),
|
||||
error,
|
||||
receiveHandler.address,
|
||||
err
|
||||
);
|
||||
}
|
||||
};
|
||||
log.partitionPump(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"Trying to create receiver in " + "consumergroup: '%s' with epoch %d from offset: %s."
|
||||
),
|
||||
rcvrOptions.consumerGroup,
|
||||
rcvrOptions.epoch,
|
||||
eventPosition.getExpression()
|
||||
);
|
||||
receiveHandler = this._client.receive(partitionId, onMessage, onError, rcvrOptions);
|
||||
this._receiveHandler = receiveHandler;
|
||||
log.partitionPump(
|
||||
withHostAndPartition(partitionId, "Created receiver '%s' with eventPosition: %s"),
|
||||
receiveHandler.address,
|
||||
eventPosition.getExpression()
|
||||
);
|
||||
}
|
||||
|
||||
private async _leaseRenewer(): Promise<void> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
let result: boolean = true;
|
||||
let error: Error | undefined;
|
||||
log.partitionPump(
|
||||
withHostAndPartition(
|
||||
this._lease,
|
||||
"Lease renewer is active after " + "%d seconds. Trying to renew the lease"
|
||||
),
|
||||
this._context.leaseRenewInterval
|
||||
);
|
||||
try {
|
||||
result = await this._context.leaseManager.renewLease(this._lease);
|
||||
if (result) {
|
||||
log.partitionPump(withHostAndPartition(this._lease, "Successfully renewed the lease."));
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while renewing the lease for partitionId ` +
|
||||
`'${this._lease.partitionId}': ${err ? err.stack : JSON.stringify(err)}`;
|
||||
error = new Error(msg);
|
||||
this._context.onEphError({
|
||||
hostName: this._context.hostName,
|
||||
partitionId: this._lease.partitionId,
|
||||
error: error,
|
||||
action: EPHActionStrings.renewingLease
|
||||
});
|
||||
log.error(withHostAndPartition(this._lease, msg));
|
||||
}
|
||||
if (!result) {
|
||||
log.error(
|
||||
withHostAndPartition(
|
||||
this._lease,
|
||||
"Failed to renew the lease, result: %s. " + "Shutting down the receiver."
|
||||
),
|
||||
result
|
||||
);
|
||||
await this._removeReceiver(CloseReason.leaseLost);
|
||||
} else {
|
||||
this._scheduleLeaseRenewer();
|
||||
}
|
||||
}
|
||||
|
||||
private _scheduleLeaseRenewer(): void {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
const renewalTime = this._context.leaseRenewInterval * 1000;
|
||||
log.partitionPump(
|
||||
withHostAndPartition(this._lease, "Scheduling lease renewal in %d seconds."),
|
||||
this._context.leaseRenewInterval
|
||||
);
|
||||
this._leaseRenewalTimer = setTimeout(async () => {
|
||||
try {
|
||||
await this._leaseRenewer();
|
||||
} catch (err: any) {
|
||||
log.error(
|
||||
withHostAndPartition(this._lease, "An error occurred in the _leaseRenewer(): %O"),
|
||||
err
|
||||
);
|
||||
}
|
||||
}, renewalTime);
|
||||
}
|
||||
|
||||
private async _removeReceiver(reason: CloseReason): Promise<void> {
|
||||
const receiveHandler = this._receiveHandler;
|
||||
const partitionContext = this._partitionContext;
|
||||
const partitionId = partitionContext.partitionId;
|
||||
const leaseId = (this._lease as AzureBlobLease).token;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
|
||||
if (receiveHandler && this._client) {
|
||||
try {
|
||||
this._context.pumps.delete(partitionId);
|
||||
log.partitionPump(withHostAndPartition(partitionId, "Deleted the pump from internal map."));
|
||||
clearTimeout(this._leaseRenewalTimer as NodeJS.Timer);
|
||||
log.partitionPump(
|
||||
withHostAndPartition(partitionId, "Removing receiver '%s', due to reason '%s'."),
|
||||
receiveHandler.address,
|
||||
partitionId,
|
||||
reason
|
||||
);
|
||||
await this._client.close();
|
||||
log.partitionPump(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"Successfully stopped the receiver '%s' for partitionId '%s' due to reason '%s'."
|
||||
),
|
||||
receiveHandler.address,
|
||||
partitionId,
|
||||
reason
|
||||
);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while closing the receiver '${receiveHandler.address}' : ` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(partitionId, "%s"), msg);
|
||||
}
|
||||
this._receiveHandler = undefined;
|
||||
this._client = undefined;
|
||||
// Release the lease if it was not lost.
|
||||
if (reason !== CloseReason.leaseLost) {
|
||||
try {
|
||||
log.partitionPump(
|
||||
withHostAndPartition(
|
||||
partitionContext,
|
||||
"Releasing lease %s after closing the receiver '%s' due to reason '%s'."
|
||||
),
|
||||
leaseId,
|
||||
receiveHandler.address,
|
||||
reason
|
||||
);
|
||||
await this._context.leaseManager.releaseLease(partitionContext.lease);
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while releasing the lease ${leaseId} ` +
|
||||
`the receiver '${receiveHandler.address}' : ${err ? err.stack : JSON.stringify(err)} `;
|
||||
log.error(withHostAndPartition(partitionId, "%s"), msg);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.partitionPump(withHostAndPartition(partitionId, "No receiver was found to remove."));
|
||||
}
|
||||
}
|
||||
|
||||
private _isReceiverDisconnectedError(error: MessagingError | Error): boolean {
|
||||
const partitionId = this._partitionContext.partitionId;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
let result = false;
|
||||
if (error) {
|
||||
// condition is "amqp:link:stolen"
|
||||
if (
|
||||
(error as MessagingError).condition === ErrorNameConditionMapper.ReceiverDisconnectedError
|
||||
) {
|
||||
result = true;
|
||||
} else if (error.message.match(/.*New receiver with higher epoch.*/i) !== null) {
|
||||
result = true;
|
||||
log.error(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"It looks like the error should have " +
|
||||
"been a 'ReceiverDisconnectedError', however it was not translated correctly: %O."
|
||||
),
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -1,394 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import { randomNumberFromInterval } from "./util/utils";
|
||||
import { HostContextWithPumpManager } from "./hostContext";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import { BaseLease } from "./baseLease";
|
||||
import { EPHActionStrings } from "./util/utils";
|
||||
import * as log from "./log";
|
||||
import { EPHDiagnosticInfo } from "./modelTypes";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class PartitionScanner {
|
||||
private _context: HostContextWithPumpManager;
|
||||
private _allLeaseStates: BaseLease[] = [];
|
||||
private _desiredCount: number = 0;
|
||||
private _unownedCount: number = 0;
|
||||
private _leaseOwnedByOthers: Map<string, BaseLease> = new Map<string, BaseLease>();
|
||||
|
||||
constructor(context: HostContextWithPumpManager) {
|
||||
this._context = context;
|
||||
}
|
||||
|
||||
async scan(isFirst: boolean): Promise<boolean> {
|
||||
const hostName = this._context.hostName;
|
||||
const withHost = this._context.withHost;
|
||||
let didSteal = false;
|
||||
try {
|
||||
this._reset();
|
||||
log.partitionScanner(withHost("Starting a new iteration to scan partitions."));
|
||||
log.partitionScanner(withHost("Getting lease states to find out who owns what."));
|
||||
await this._getAllLeaseStates();
|
||||
log.partitionScanner(withHost("Sorting leases to find out the desired partition count."));
|
||||
const ourLeasesCount: number = await this._sortLeasesAndCalculateDesiredCount(isFirst);
|
||||
log.partitionScanner(withHost("Our lease count: %d."), ourLeasesCount);
|
||||
const attemptToAcquire: number = this._desiredCount - ourLeasesCount;
|
||||
log.partitionScanner(
|
||||
withHost("Attempting to acquire %d leases in parallel starting from " + "position 0."),
|
||||
attemptToAcquire
|
||||
);
|
||||
const remainingNeeded = await this._acquireExpiredInParallel(0, attemptToAcquire);
|
||||
log.partitionScanner(withHost("Looking to steal: %d."), remainingNeeded);
|
||||
if (remainingNeeded > 0) {
|
||||
const stealThese = await this._findLeasesToSteal(remainingNeeded);
|
||||
log.partitionScanner(withHost("Number of lease found to steal: %d."), stealThese.length);
|
||||
didSteal = await this._stealLeases(stealThese);
|
||||
log.partitionScanner(
|
||||
withHost("Have succesfully stolen: %d leases -> %s."),
|
||||
stealThese.length,
|
||||
didSteal
|
||||
);
|
||||
} else {
|
||||
log.partitionScanner(
|
||||
withHost("No need to scan further since remaining needed: %d."),
|
||||
remainingNeeded
|
||||
);
|
||||
}
|
||||
} catch (err: any) {
|
||||
didSteal = false;
|
||||
const msg =
|
||||
`An error occurred while scanning leases: ` + `${err ? err.stack : JSON.stringify(err)}.`;
|
||||
log.error(withHost("%s"), hostName, msg);
|
||||
const info: EPHDiagnosticInfo = {
|
||||
action: EPHActionStrings.scanningLeases,
|
||||
error: new Error(msg),
|
||||
hostName: hostName,
|
||||
partitionId: "N/A"
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
}
|
||||
return didSteal;
|
||||
}
|
||||
|
||||
private _reset(): void {
|
||||
this._allLeaseStates = [];
|
||||
this._desiredCount = 0;
|
||||
this._unownedCount = 0;
|
||||
this._leaseOwnedByOthers = new Map<string, BaseLease>();
|
||||
}
|
||||
|
||||
private async _getAllLeaseStates(): Promise<void> {
|
||||
const result = await this._context.leaseManager.getAllLeases();
|
||||
this._allLeaseStates = result.sort();
|
||||
log.partitionScanner(
|
||||
this._context.withHost("State of all the leases that we know about:\n%O"),
|
||||
this._allLeaseStates
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
private _sortLeasesAndCalculateDesiredCount(isFirst: boolean): number {
|
||||
const hostName: string = this._context.hostName;
|
||||
const withHost = this._context.withHost;
|
||||
log.partitionScanner(
|
||||
withHost("Accounting input: allLeaseStates count is: %d"),
|
||||
this._allLeaseStates.length
|
||||
);
|
||||
const uniqueOwners: Set<string> = new Set<string>();
|
||||
uniqueOwners.add(hostName);
|
||||
let ourLeasesCount = 0;
|
||||
this._unownedCount = 0;
|
||||
for (const lease of this._allLeaseStates) {
|
||||
const ownedByUs: boolean = lease.isOwned && lease.owner === hostName;
|
||||
if (lease.isOwned && lease.owner) {
|
||||
uniqueOwners.add(lease.owner);
|
||||
} else {
|
||||
this._unownedCount++;
|
||||
}
|
||||
if (ownedByUs) {
|
||||
ourLeasesCount++;
|
||||
} else if (lease.isOwned) {
|
||||
this._leaseOwnedByOthers.set(lease.partitionId, lease);
|
||||
}
|
||||
}
|
||||
const hostCount = uniqueOwners.size;
|
||||
const countPerHost = Math.floor(this._allLeaseStates.length / hostCount);
|
||||
this._desiredCount = isFirst ? 1 : countPerHost;
|
||||
if (
|
||||
!isFirst &&
|
||||
this._unownedCount > 0 &&
|
||||
this._unownedCount < hostCount &&
|
||||
this._allLeaseStates.length % hostCount !== 0
|
||||
) {
|
||||
// distribute leftovers
|
||||
this._desiredCount++;
|
||||
}
|
||||
|
||||
const sortedHosts: Array<string> = Array.from(uniqueOwners).sort();
|
||||
let hostOrdinal: number = -1;
|
||||
let startingPoint: number = 0;
|
||||
if (isFirst) {
|
||||
// If the entire system is starting up, the list of hosts is probably not complete and we
|
||||
// can not really compute a meaningful hostOrdinal. But we only want hostOrdinal to
|
||||
// calculate startingPoint. Instead, just randomly select a startingPoint.
|
||||
startingPoint = randomNumberFromInterval(0, this._allLeaseStates.length - 1);
|
||||
} else {
|
||||
for (hostOrdinal = 0; hostOrdinal > sortedHosts.length; hostOrdinal++) {
|
||||
if (sortedHosts[hostOrdinal] === hostName) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
startingPoint = countPerHost * hostOrdinal;
|
||||
}
|
||||
// rotate this._allLeaseStates
|
||||
log.partitionScanner(
|
||||
withHost("Host ordinal: %d. Rotating leases to start at: %d."),
|
||||
hostOrdinal,
|
||||
startingPoint
|
||||
);
|
||||
if (startingPoint !== 0) {
|
||||
const rotatedList: Array<BaseLease> = [];
|
||||
for (let i = 0; i < this._allLeaseStates.length; i++) {
|
||||
const index = (i + startingPoint) % this._allLeaseStates.length;
|
||||
rotatedList.push(this._allLeaseStates[index]);
|
||||
}
|
||||
this._allLeaseStates = rotatedList;
|
||||
}
|
||||
log.partitionScanner(
|
||||
withHost("Host count is %d. So Desired partition count is %d."),
|
||||
hostCount,
|
||||
this._desiredCount
|
||||
);
|
||||
log.partitionScanner(
|
||||
withHost("our leases count: %d v/s leases owned by others: %d."),
|
||||
ourLeasesCount,
|
||||
this._leaseOwnedByOthers.size
|
||||
);
|
||||
log.partitionScanner(withHost("Total number of pumps: %d."), this._context.pumps.size);
|
||||
return ourLeasesCount;
|
||||
}
|
||||
|
||||
private _findExpiredLeases(startAt: number, endAt: number): BaseLease[] {
|
||||
const withHost = this._context.withHost;
|
||||
const expiredLeases: BaseLease[] = [];
|
||||
log.partitionScanner(
|
||||
withHost("Finding expired leases from '%s': [%d] upto '%s': [%d]"),
|
||||
this._allLeaseStates[startAt].partitionId,
|
||||
startAt,
|
||||
endAt < this._allLeaseStates.length ? this._allLeaseStates[endAt].partitionId : "end",
|
||||
endAt
|
||||
);
|
||||
for (const lease of this._allLeaseStates.slice(startAt, endAt)) {
|
||||
if (!lease.isOwned) {
|
||||
expiredLeases.push(lease);
|
||||
}
|
||||
}
|
||||
log.partitionScanner(withHost("Found in range: %d"), expiredLeases.length);
|
||||
return expiredLeases;
|
||||
}
|
||||
|
||||
private _acquireExpiredInParallel(startAt: number, needed: number): Promise<number> {
|
||||
const hostName = this._context.hostName;
|
||||
const withHost = this._context.withHost;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
const resultPromise = Promise.resolve(needed);
|
||||
if (startAt < this._allLeaseStates.length) {
|
||||
const lease = this._allLeaseStates[startAt];
|
||||
const partitionId = lease ? lease.partitionId : "undefined";
|
||||
log.partitionScanner(
|
||||
withHost("Examining chunk at '%s': [%d], needed %d."),
|
||||
partitionId,
|
||||
startAt,
|
||||
needed
|
||||
);
|
||||
} else {
|
||||
log.partitionScanner(withHost("Examining chunk skipping, startAt is off end: %d"), startAt);
|
||||
}
|
||||
|
||||
if (needed > 0 && this._unownedCount > 0 && startAt < this._allLeaseStates.length) {
|
||||
let runningNeeded = needed;
|
||||
const endAt = Math.min(startAt + needed, this._allLeaseStates.length);
|
||||
log.partitionScanner(
|
||||
withHost("Finding expired leases from inclusive position range %d - %d"),
|
||||
startAt,
|
||||
endAt
|
||||
);
|
||||
const getThese: BaseLease[] = this._findExpiredLeases(startAt, endAt);
|
||||
const leaseManager = this._context.leaseManager;
|
||||
const getTheseResult: Promise<void>[] = [];
|
||||
for (const thisLease of getThese) {
|
||||
let lease: CompleteLease | undefined = undefined;
|
||||
const getThisPromise = leaseManager
|
||||
.getLease(thisLease.partitionId)
|
||||
.then((receivedLease) => {
|
||||
lease = receivedLease;
|
||||
if (lease) {
|
||||
return leaseManager.acquireLease(lease);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
})
|
||||
.then((acquired) => {
|
||||
if (acquired) {
|
||||
runningNeeded--;
|
||||
log.partitionScanner(
|
||||
withHostAndPartition(thisLease, "Acquired unowned/expired lease.")
|
||||
);
|
||||
if (this._leaseOwnedByOthers.has(lease!.partitionId)) {
|
||||
this._leaseOwnedByOthers.delete(lease!.partitionId);
|
||||
this._unownedCount--;
|
||||
}
|
||||
return this._context.pumpManager.addPump(lease!);
|
||||
} else if (lease) {
|
||||
this._leaseOwnedByOthers.set(lease.partitionId, lease);
|
||||
}
|
||||
return Promise.resolve();
|
||||
})
|
||||
.catch((err) => {
|
||||
const msg =
|
||||
`An error occurred while getting/acquiring lease for partitionId ` +
|
||||
`'${thisLease.partitionId}': ${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(thisLease, "%s"), msg);
|
||||
const info: EPHDiagnosticInfo = {
|
||||
action: EPHActionStrings.checkingLeases,
|
||||
error: new Error(msg),
|
||||
hostName: hostName,
|
||||
partitionId: thisLease.partitionId
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
});
|
||||
getTheseResult.push(getThisPromise);
|
||||
}
|
||||
|
||||
return resultPromise.then(() => {
|
||||
return Promise.all(getTheseResult)
|
||||
.catch((err) => {
|
||||
const msg =
|
||||
`An error occurred while getting/acquiring leases for some partitionId: ` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHost("%s"), msg);
|
||||
})
|
||||
.then(() => {
|
||||
log.partitionScanner(
|
||||
withHost("Calling _acquireExpiredInParallel with startAt %d, " + "needed %d."),
|
||||
endAt,
|
||||
runningNeeded
|
||||
);
|
||||
return this._acquireExpiredInParallel(endAt, runningNeeded);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
log.partitionScanner(
|
||||
withHost("Short circuit: needed is %d, unowned is: %d, off end -> %s."),
|
||||
needed,
|
||||
this._unownedCount,
|
||||
startAt < this._allLeaseStates.length
|
||||
);
|
||||
}
|
||||
|
||||
return resultPromise;
|
||||
}
|
||||
|
||||
private _findLeasesToSteal(stealAsk: number): BaseLease[] {
|
||||
// Generate a map of hostnames and owned counts.
|
||||
const hostOwns: Map<string, number> = new Map<string, number>();
|
||||
for (const lease of this._leaseOwnedByOthers.values()) {
|
||||
if (hostOwns.has(lease.owner)) {
|
||||
const newCount = hostOwns.get(lease.owner)! + 1;
|
||||
hostOwns.set(lease.owner, newCount);
|
||||
} else {
|
||||
hostOwns.set(lease.owner, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract hosts which own more than the desired count
|
||||
const bigOwners: Array<string> = [];
|
||||
const withHost = this._context.withHost;
|
||||
for (const entry of hostOwns.entries()) {
|
||||
if (entry[1] > this._desiredCount) {
|
||||
bigOwners.push(entry[0]);
|
||||
log.partitionScanner(withHost("Big owner %s has %d"), entry[0], entry[1]);
|
||||
}
|
||||
}
|
||||
const stealLeases: BaseLease[] = [];
|
||||
if (bigOwners.length > 0) {
|
||||
// Randomly pick one of the big owners
|
||||
const index: number = randomNumberFromInterval(0, bigOwners.length);
|
||||
const bigVictim: string = bigOwners[index];
|
||||
const victimExtra = hostOwns.get(bigVictim)! - this._desiredCount - 1;
|
||||
const stealCount = Math.min(victimExtra, stealAsk);
|
||||
log.partitionScanner(withHost("Stealing %d from %s."), stealCount, bigVictim);
|
||||
|
||||
// Grab stealCount partitions owned by bigVictim and return the leases.
|
||||
for (const candidate of this._allLeaseStates) {
|
||||
if (candidate.owner != undefined && candidate.owner === bigVictim) {
|
||||
stealLeases.push(candidate);
|
||||
if (stealLeases.length >= stealCount) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.partitionScanner(withHost("No big owners found, skipping steal."));
|
||||
}
|
||||
return stealLeases;
|
||||
}
|
||||
|
||||
private async _stealLeases(stealThese: BaseLease[]): Promise<boolean> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
if (stealThese.length > 0) {
|
||||
const steals: Promise<boolean>[] = [];
|
||||
for (const stealableLease of stealThese) {
|
||||
let lease: CompleteLease | undefined = undefined;
|
||||
const tryStealPromise: Promise<boolean> = this._context.leaseManager
|
||||
.getLease(stealableLease.partitionId)
|
||||
.then((receivedLease) => {
|
||||
lease = receivedLease;
|
||||
if (receivedLease) {
|
||||
return this._context.leaseManager.acquireLease(receivedLease);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
})
|
||||
.then((acquired) => {
|
||||
if (acquired) {
|
||||
this._context.pumpManager.addPump(lease!).catch();
|
||||
log.partitionScanner(
|
||||
withHostAndPartition(stealableLease, "Successfully stolen the lease.")
|
||||
);
|
||||
}
|
||||
return acquired;
|
||||
})
|
||||
.catch((err) => {
|
||||
const msg =
|
||||
`An error occurred while stealing the lease for partitionId ` +
|
||||
`'${stealableLease.partitionId}': ${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(stealableLease, "%s"), msg);
|
||||
const info: EPHDiagnosticInfo = {
|
||||
action: EPHActionStrings.stealingLease,
|
||||
partitionId: stealableLease.partitionId,
|
||||
hostName: this._context.hostName,
|
||||
error: err
|
||||
};
|
||||
this._context.onEphError(info);
|
||||
return false;
|
||||
});
|
||||
steals.push(tryStealPromise);
|
||||
}
|
||||
|
||||
const stealResult = await Promise.all<boolean>(steals);
|
||||
// If we found at least one case where the lease could not be stolen then `.some()`
|
||||
// returns true. The final result will be true if `.some()` was not able to find a single
|
||||
// lease that could not be stolen.
|
||||
const result = !stealResult.some((x) => {
|
||||
return !x;
|
||||
});
|
||||
return result;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import * as log from "./log";
|
||||
import { HostContextWithCheckpointLeaseManager } from "./hostContext";
|
||||
import { PartitionPump } from "./partitionPump";
|
||||
import { CompleteLease } from "./completeLease";
|
||||
import { CloseReason } from "./modelTypes";
|
||||
import { EPHActionStrings } from "./util/utils";
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export class PumpManager {
|
||||
private _context: HostContextWithCheckpointLeaseManager;
|
||||
|
||||
constructor(context: HostContextWithCheckpointLeaseManager) {
|
||||
this._context = context;
|
||||
}
|
||||
|
||||
async addPump(lease: CompleteLease): Promise<void> {
|
||||
const hostName = this._context.hostName;
|
||||
const partitionId = lease.partitionId;
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
const capturedPump = this._context.pumps.get(partitionId);
|
||||
if (capturedPump) {
|
||||
const isOpen = capturedPump.isOpen();
|
||||
if (!isOpen) {
|
||||
log.error(withHostAndPartition(partitionId, "The existing pump is open -> %s."), isOpen);
|
||||
await this.removePump(partitionId, CloseReason.shutdown);
|
||||
} else {
|
||||
log.pumpManager(
|
||||
withHostAndPartition(
|
||||
partitionId,
|
||||
"Updating lease for pump since it" + "is open -> %s."
|
||||
),
|
||||
partitionId,
|
||||
isOpen
|
||||
);
|
||||
capturedPump.lease = lease;
|
||||
}
|
||||
} else {
|
||||
log.pumpManager(
|
||||
withHostAndPartition(partitionId, "Creating a new pump with lease %o."),
|
||||
lease.getInfo()
|
||||
);
|
||||
const pump = new PartitionPump(
|
||||
this._context,
|
||||
lease,
|
||||
this._context.onMessage!,
|
||||
this._context.onError!
|
||||
);
|
||||
await pump.start();
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while adding/updating a pump for partitionId ` +
|
||||
`'${partitionId}': ${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(partitionId, "%s."), msg);
|
||||
this._context.onEphError({
|
||||
hostName: hostName,
|
||||
partitionId: partitionId,
|
||||
error: new Error(msg),
|
||||
action: EPHActionStrings.partitionReceiverManagement
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async removePump(partitionId: string, reason: CloseReason): Promise<void> {
|
||||
const withHostAndPartition = this._context.withHostAndPartition;
|
||||
try {
|
||||
const capturedPump = this._context.pumps.get(partitionId);
|
||||
if (capturedPump) {
|
||||
log.pumpManager(withHostAndPartition(partitionId, "Stopping the pump."));
|
||||
await capturedPump.stop(reason);
|
||||
} else {
|
||||
log.pumpManager(withHostAndPartition(partitionId, "No pump was found, to remove."));
|
||||
}
|
||||
} catch (err: any) {
|
||||
const msg =
|
||||
`An error occurred while removing a pump for partitionId '${partitionId}': ` +
|
||||
`${err ? err.stack : JSON.stringify(err)}`;
|
||||
log.error(withHostAndPartition(partitionId, "%s."), msg);
|
||||
this._context.onEphError({
|
||||
hostName: this._context.hostName,
|
||||
partitionId: partitionId,
|
||||
error: new Error(msg),
|
||||
action: EPHActionStrings.partitionReceiverManagement
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async removeAllPumps(reason: CloseReason): Promise<void> {
|
||||
const withHost = this._context.withHost;
|
||||
const tasks: Promise<void>[] = [];
|
||||
for (const id of this._context.pumps.keys()) {
|
||||
tasks.push(this.removePump(id, reason));
|
||||
}
|
||||
log.partitionManager(withHost("Removing all the pumps due to reason %s."), reason);
|
||||
await Promise.all(tasks);
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
export const minLeaseDurationInSeconds = 15;
|
||||
export const maxLeaseDurationInSeconds = 60;
|
||||
export const defaultLeaseDurationInSeconds = 30;
|
||||
export const defaultLeaseRenewIntervalInSeconds = 10;
|
||||
export const defaultMaximumExecutionTimeInMs = 120000;
|
||||
export const maximumExecutionTimeInMsForLeaseRenewal = 60000;
|
||||
export const defaultCheckpointTimeoutInSeconds = 120;
|
||||
export const defaultStartupScanDelayInSeconds = 30;
|
||||
export const defaultFastScanIntervalInSeconds = 3;
|
||||
export const defaultSlowScanIntervalInSeconds = 5;
|
||||
export const metadataOwnerName = "owninghost";
|
||||
export const leaseLost = "leaselost";
|
||||
export const leaseIdMismatchWithLeaseOperation = "leaseidmismatchwithleaseoperation";
|
||||
export const leaseIdMismatchWithBlobOperation = "leaseidmismatchwithbloboperation";
|
||||
export const defaultConsumerGroup = "$default";
|
||||
export const packageInfo = {
|
||||
name: "@azure/event-processor-host",
|
||||
version: "2.1.1"
|
||||
};
|
|
@ -1,197 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import * as log from "../log";
|
||||
import { StorageError } from "azure-storage";
|
||||
import { EPHDiagnosticInfo } from "../modelTypes";
|
||||
|
||||
/**
|
||||
* Generates a random number between the given interval
|
||||
* @param {number} min Min number of the range (inclusive).
|
||||
* @param {number} max Max number of the range (inclusive).
|
||||
*/
|
||||
export function randomNumberFromInterval(min: number, max: number): number {
|
||||
return Math.floor(Math.random() * (max - min + 1) + min);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the type and requiredness of a given parameter.
|
||||
* @param paramName The name of the parameter.
|
||||
* @param paramValue The parameter value
|
||||
* @param type The type of the parameter
|
||||
*/
|
||||
export function validateType(
|
||||
paramName: string,
|
||||
paramValue: any,
|
||||
required: boolean,
|
||||
type: "string" | "number" | "boolean" | "Array" | "object" | "Date" | "function"
|
||||
): void {
|
||||
if (required && paramValue == undefined) {
|
||||
throw new TypeError(
|
||||
`${paramName} is required. Given value: ${paramValue}. Hence it cannot be null or undefined.`
|
||||
);
|
||||
}
|
||||
if (paramValue != undefined) {
|
||||
if (type === "Array") {
|
||||
if (!Array.isArray(paramValue)) {
|
||||
throw new TypeError(`${paramName} must be of type "${type}".`);
|
||||
}
|
||||
} else if (type === "Date") {
|
||||
if (!(paramValue instanceof Date)) {
|
||||
throw new TypeError(`${paramName} must be of type "${type}".`);
|
||||
}
|
||||
} else if (
|
||||
type === "string" ||
|
||||
type === "number" ||
|
||||
type === "boolean" ||
|
||||
type === "object" ||
|
||||
type === "function"
|
||||
) {
|
||||
if (typeof paramValue !== type) {
|
||||
throw new TypeError(`${paramName} must be of type "${type}".`);
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid argument. type "${type}" is not a valid type. Valid values are: ` +
|
||||
`"string", "number", "boolean", "Array", "object", "Date", "function"`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface StorageErrorInfo {
|
||||
name: string;
|
||||
message: string;
|
||||
statusCode: number;
|
||||
code: string;
|
||||
requestId: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export function getStorageError(err: StorageError): StorageErrorInfo {
|
||||
return {
|
||||
name: err.name,
|
||||
message: err.message,
|
||||
statusCode: err.statusCode!,
|
||||
code: err.code!,
|
||||
requestId: err.requestId!
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export interface RetryConfig<T> {
|
||||
hostName: string;
|
||||
operation: () => Promise<T>;
|
||||
partitionId?: string;
|
||||
retryMessage: string;
|
||||
finalFailureMessage: string;
|
||||
action: string;
|
||||
maxRetries: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export enum EPHActionStrings {
|
||||
acquireLease = "Acquire Lease",
|
||||
gettingPartitionIds = "Getting PartitionIds",
|
||||
gettingAllLeases = "Getting All Leases",
|
||||
creatingAllLeases = "Creating All Leases",
|
||||
scanningLeases = "Scanning leases",
|
||||
checkingLeases = "Checking Leases",
|
||||
checkingExpiredLeases = "Checking Expired Leases",
|
||||
renewingLease = "Renewing Lease",
|
||||
stealingLease = "Stealing Lease",
|
||||
creatingLease = "Creating Lease",
|
||||
creatingCheckpoint = "Creating Checkpoint",
|
||||
updatingCheckpoint = "Updating Checkpoint",
|
||||
creatingCheckpointStore = "Creating Checkpoint Store",
|
||||
creatingEventProcessor = "Creating Event Processor",
|
||||
creatingLeaseStore = "Creating Lease Store",
|
||||
initializingStores = "Initializing Stores",
|
||||
partitionManagerCleanup = "Partition Manager Cleanup",
|
||||
partitionManagerMainLoop = "Partition Manager Main Loop",
|
||||
partitionReceiverManagement = "Partition Receiver Management",
|
||||
deletingLeaseStore = "Deleting Lease Store"
|
||||
}
|
||||
|
||||
/**
|
||||
* @hidden
|
||||
*/
|
||||
export async function retry<T>(config: RetryConfig<T>): Promise<T> {
|
||||
let createdOK: boolean = false;
|
||||
let retryCount: number = 0;
|
||||
let result: T = undefined as any;
|
||||
let innerError: Error | undefined = undefined;
|
||||
do {
|
||||
try {
|
||||
result = await config.operation();
|
||||
createdOK = true;
|
||||
if (config.partitionId) {
|
||||
log.util(
|
||||
"[%s] Retry attempt: %d. Action '%s' for partitionId: '%s' suceeded.",
|
||||
config.hostName,
|
||||
retryCount,
|
||||
config.action,
|
||||
config.partitionId
|
||||
);
|
||||
} else {
|
||||
log.util(
|
||||
"[%s] Retry attempt: %d. Action '%s' suceeded.",
|
||||
config.hostName,
|
||||
retryCount,
|
||||
config.action
|
||||
);
|
||||
}
|
||||
} catch (err: any) {
|
||||
innerError = err;
|
||||
if (config.partitionId) {
|
||||
log.error(
|
||||
"[%s] An error occurred. Retry attempt: %d. PartitionId: '%s'. %s: %O",
|
||||
config.hostName,
|
||||
config.partitionId,
|
||||
retryCount,
|
||||
config.retryMessage,
|
||||
err
|
||||
);
|
||||
} else {
|
||||
log.error(
|
||||
"[%s] An error occurred. Retry attempt: %d. %s: %O",
|
||||
config.hostName,
|
||||
retryCount,
|
||||
config.retryMessage,
|
||||
err
|
||||
);
|
||||
}
|
||||
retryCount++;
|
||||
}
|
||||
} while (!createdOK && retryCount < config.maxRetries);
|
||||
|
||||
if (!createdOK) {
|
||||
let msg: string;
|
||||
if (innerError) {
|
||||
msg =
|
||||
`${config.finalFailureMessage} while performing the action "${config.action}" ` +
|
||||
`due to ${innerError.stack ? innerError.stack : JSON.stringify(innerError)}`;
|
||||
} else {
|
||||
msg = `${config.finalFailureMessage} while performing the action "${config.action}"`;
|
||||
}
|
||||
|
||||
log.error("[%s] %s", config.hostName, msg);
|
||||
const info: EPHDiagnosticInfo = {
|
||||
action: config.action,
|
||||
hostName: config.hostName,
|
||||
partitionId: config.partitionId || "N/A",
|
||||
error: new Error(msg)
|
||||
};
|
||||
throw info;
|
||||
}
|
||||
return result;
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
# Testing
|
||||
|
||||
To test this project, make sure to build it by following our [building instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#building), then follow the [testing instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#testing).
|
||||
|
||||
The Azure Event Processor Host client does not have any recorded tests and so, all the tests require an Azure Event Hubs namespace to be set up beforehand with at lease a single Event Hub instance in it. You can use existing Azure resources for the live tests, or generate new ones by using our [New-TestResources.ps1](https://github.com/Azure/azure-sdk-for-js/blob/main/eng/common/TestResources/New-TestResources.ps1) script, which will use an [ARM template](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/eventhub/test-resources.json) that already has all of the the necessary configurations.
|
||||
|
||||
The Azure resources that are used by the tests in this project are:
|
||||
|
||||
- An [Azure Event Hubs namespace](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#namespace). Your Azure application needs to be assigned as the **owner** of this Azure Key Vault. The steps are provided [below](#aad-based-authentication).
|
||||
- Some very basic Network configurations for the Event Hubs namespace.
|
||||
- A [consumer group](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#consumer-groups) to publish and subscribe to the Event Hubs namespace created.
|
||||
- An [Azure Storage account](https://docs.microsoft.com/azure/storage/common/storage-account-overview) configured to provide [blob storage](https://docs.microsoft.com/azure/storage/blobs/storage-blobs-introduction#blob-storage-resources).
|
||||
- An [Azure IoT Hub account](https://docs.microsoft.com/azure/iot-hub/about-iot-hub).
|
||||
|
||||
To run the live tests, you will also need to set the below environment variables:
|
||||
|
||||
- `TEST_MODE`: Should have `live` assigned.
|
||||
- `EVENTHUB_NAME`: The name of your Azure Event Hub namespace.
|
||||
- `EVENTHUB_CONNECTION_STRING`: The connection string of your Azure Event Hub namespace.
|
||||
- `AZURE_CLIENT_ID`: The client ID of an Azure Active Directory application.
|
||||
- `AZURE_CLIENT_SECRET`: The client secret of an Azure Active Directory application.
|
||||
- `AZURE_TENANT_ID`: The Tenant ID of your organization in Azure Active Directory.
|
||||
|
||||
## AAD based authentication
|
||||
|
||||
The following steps will help you setup the AAD credentials.
|
||||
|
||||
### Register a new application in AAD
|
||||
|
||||
- Follow [Documentation to register a new application](https://docs.microsoft.com/azure/active-directory/develop/quickstart-register-app) in the Azure Active Directory (in the Azure portal).
|
||||
- Note down the `CLIENT_ID` and `TENANT_ID`.
|
||||
- In the "Certificates & Secrets" tab, create a secret and note that down.
|
||||
|
||||
### Assign owner role to the registered application
|
||||
|
||||
- In the Azure portal, go to your Azure Event Hubs namespace and assign the **Owner** role to the registered application.
|
||||
- This can be done from `Role assignment` section of `Access control (IAM)` tab (in the left-side-navbar of your Azure Event Hubs namespace in the Azure portal)<br>
|
||||
_Doing this would allow the registered application manage the namespace, i.e., entity creation, deletion, etc.,_<br>
|
||||
- For more information on securing your Azure Event Hubs namespace: [Learn more](https://docs.microsoft.com/azure/event-hubs/authorize-access-event-hubs)
|
||||
|
||||
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Feventhub%2Fevent-processor-host%2Ftest%2FREADME.png)
|
|
@ -1,694 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import chai from "chai";
|
||||
import os from "os";
|
||||
import { v4 as uuid } from "uuid";
|
||||
import chaiAsPromised from "chai-as-promised";
|
||||
chai.use(chaiAsPromised);
|
||||
import chaiString from "chai-string";
|
||||
chai.use(chaiString);
|
||||
import debugModule from "debug";
|
||||
const should = chai.should();
|
||||
const debug = debugModule("azure:eph:eph-spec");
|
||||
import { EventHubClient, EventData, EventPosition, delay, Dictionary } from "@azure/event-hubs";
|
||||
import dotenv from "dotenv";
|
||||
import { PartitionContext, OnReceivedMessage, EventProcessorHost, OnReceivedError } from "../src";
|
||||
import { packageInfo } from "../src/util/constants";
|
||||
dotenv.config();
|
||||
|
||||
describe("EPH", function(): void {
|
||||
before("validate environment", function(): void {
|
||||
should.exist(
|
||||
process.env.STORAGE_CONNECTION_STRING,
|
||||
"define STORAGE_CONNECTION_STRING in your environment before running integration tests."
|
||||
);
|
||||
should.exist(
|
||||
process.env.EVENTHUB_CONNECTION_STRING,
|
||||
"define EVENTHUB_CONNECTION_STRING in your environment before running integration tests."
|
||||
);
|
||||
should.exist(
|
||||
process.env.EVENTHUB_NAME,
|
||||
"define EVENTHUB_NAME in your environment before running integration tests."
|
||||
);
|
||||
});
|
||||
const ehConnString = process.env.EVENTHUB_CONNECTION_STRING;
|
||||
const storageConnString = process.env.STORAGE_CONNECTION_STRING;
|
||||
const hubName = process.env.EVENTHUB_NAME;
|
||||
let host: EventProcessorHost;
|
||||
|
||||
describe("user-agent", function(): void {
|
||||
it("should be populated correctly as a part of the connection property", function(done: Mocha.Done): void {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
"test-container",
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!
|
||||
}
|
||||
);
|
||||
const context = host["_context"];
|
||||
const uaPrefix = `azsdk-js-azureeventprocessorhost/${packageInfo.version} `;
|
||||
context.userAgent.should.include(uaPrefix);
|
||||
context.userAgent.should.include(
|
||||
`NODE-VERSION ${process.version}; ${os.type()} ${os.release()}`
|
||||
);
|
||||
const ehc: EventHubClient = context.getEventHubClient();
|
||||
const properties = ehc["_context"].connection.options.properties;
|
||||
properties["user-agent"].should.include(uaPrefix);
|
||||
should.equal(properties.product, "MSJSClient");
|
||||
done();
|
||||
});
|
||||
|
||||
it("should support appending custom user-agent", function(done: Mocha.Done): void {
|
||||
const customua = "my-custom-string";
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
"test-container",
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
userAgent: customua
|
||||
}
|
||||
);
|
||||
const context = host["_context"];
|
||||
const uaPrefix = `azsdk-js-azureeventprocessorhost/${packageInfo.version} `;
|
||||
context.userAgent.should.startWith(uaPrefix);
|
||||
context.userAgent.should.endWith(customua);
|
||||
const ehc: EventHubClient = context.getEventHubClient();
|
||||
const properties = ehc["_context"].connection.options.properties;
|
||||
properties["user-agent"].should.include(uaPrefix);
|
||||
properties["user-agent"].should.endWith(customua);
|
||||
should.equal(properties.product, "MSJSClient");
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
describe("single", function(): void {
|
||||
it("should checkpoint messages in order", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const messageCount = 100;
|
||||
const datas: EventData[] = [];
|
||||
for (let i = 0; i < messageCount; i++) {
|
||||
const obj: EventData = { body: `Hello foo ${i}` };
|
||||
datas.push(obj);
|
||||
}
|
||||
const ehc = EventHubClient.createFromConnectionString(ehConnString!, hubName!);
|
||||
await ehc.sendBatch(datas, "0");
|
||||
await ehc.close();
|
||||
debug("Sent batch message successfully");
|
||||
let num = 0;
|
||||
let offset = "0";
|
||||
let sequence = 0;
|
||||
let doneCheckpointing = false;
|
||||
const onMessage = async (context: PartitionContext, data: EventData) => {
|
||||
++num;
|
||||
debug("num: %d", num);
|
||||
if (num % 10 === 0) {
|
||||
const cpointNum = num;
|
||||
try {
|
||||
await context.checkpoint();
|
||||
debug("Done checkpointing: %d", cpointNum);
|
||||
if (num === 100) {
|
||||
offset = data.offset as string;
|
||||
sequence = data.sequenceNumber as number;
|
||||
doneCheckpointing = true;
|
||||
}
|
||||
} catch (err: any) {
|
||||
debug("An error occurred while checkpointing msg number %d: %O", num, err);
|
||||
}
|
||||
}
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
await host.start(onMessage, onError);
|
||||
while (!doneCheckpointing) {
|
||||
debug("Not done checkpointing -> %s, sleeping for 10 more seconds.", doneCheckpointing);
|
||||
await delay(10000);
|
||||
}
|
||||
debug("sleeping for 10 more seconds..");
|
||||
await delay(10000);
|
||||
const stringContent = await host["_context"].blobReferenceByPartition["0"].getContent();
|
||||
const content = JSON.parse(stringContent);
|
||||
debug("Fetched content from blob is: %o", content);
|
||||
content.offset.should.equal(offset);
|
||||
content.sequenceNumber.should.equal(sequence);
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should checkpoint a single received event.", function(done: Mocha.Done): void {
|
||||
const msgId = uuid();
|
||||
const ehc = EventHubClient.createFromConnectionString(ehConnString!, hubName!);
|
||||
ehc
|
||||
.getPartitionIds()
|
||||
.then((ids) => {
|
||||
debug("Test logs: Received partition ids: ", ids);
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
debug("Test logs: Sending the test message...");
|
||||
ehc
|
||||
.send({ body: "Test Message", properties: { message_id: msgId } })
|
||||
.then(() => {
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug("Test logs: Rx message from '%s': '%s'", context.partitionId, data);
|
||||
if (data.properties!.message_id === msgId) {
|
||||
debug("Test logs: Checkpointing the received message...");
|
||||
context
|
||||
.checkpoint()
|
||||
.then(() => {
|
||||
debug("Test logs: Checkpoint succesful...");
|
||||
return context["_context"].blobReferenceByPartition[
|
||||
context.partitionId
|
||||
].getContent();
|
||||
})
|
||||
.then((content) => {
|
||||
debug("Test logs: Seen expected message. New lease contents: %s", content);
|
||||
const parsed = JSON.parse(content);
|
||||
parsed.offset.should.eql(data.offset);
|
||||
})
|
||||
.then(() => {
|
||||
return ehc.close();
|
||||
})
|
||||
.then(() => {
|
||||
return host.stop();
|
||||
})
|
||||
.then(() => {
|
||||
debug("Test logs: closed the sender and the eph...");
|
||||
return done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
}
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
done(err);
|
||||
};
|
||||
return host.start(onMessage, onError);
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should be able to receive messages from the checkpointed offset.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
const msgId = uuid();
|
||||
const ehc = EventHubClient.createFromConnectionString(ehConnString!, hubName!);
|
||||
const leasecontainerName = EventProcessorHost.createHostName("tc");
|
||||
debug("Test logs: Lease container name: %s", leasecontainerName);
|
||||
async function sendAcrossAllPartitions(
|
||||
ehc: EventHubClient,
|
||||
ids: string[]
|
||||
): Promise<Dictionary<EventData>> {
|
||||
const result: Promise<any>[] = [];
|
||||
const idMessage: Dictionary<EventData> = {};
|
||||
for (const id of ids) {
|
||||
const data = { body: "Test Message - " + id, properties: { message_id: msgId } };
|
||||
idMessage[id] = data;
|
||||
result.push(ehc.send(data, id));
|
||||
}
|
||||
await Promise.all(result);
|
||||
debug("Test logs: Successfully finished sending messages.. %O", idMessage);
|
||||
return idMessage;
|
||||
}
|
||||
|
||||
const ids = await ehc.getPartitionIds();
|
||||
debug("Test logs: Received partition ids: ", ids);
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
"my-eph-1",
|
||||
storageConnString!,
|
||||
leasecontainerName,
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now()),
|
||||
startupScanDelay: 15,
|
||||
leaseRenewInterval: 5,
|
||||
leaseDuration: 15
|
||||
}
|
||||
);
|
||||
await delay(1000);
|
||||
debug("Test logs: Sending the first set of test messages...");
|
||||
const firstSend = await sendAcrossAllPartitions(ehc, ids);
|
||||
let count = 0;
|
||||
const onMessage: OnReceivedMessage = async (context: PartitionContext, data: EventData) => {
|
||||
const partitionId = context.partitionId;
|
||||
debug("Test logs: Rx message from '%s': '%o'", partitionId, data);
|
||||
if (data.properties!.message_id === firstSend[partitionId].properties!.message_id) {
|
||||
debug("Test logs: Checkpointing the received message...");
|
||||
await context.checkpoint();
|
||||
count++;
|
||||
} else {
|
||||
const msg =
|
||||
`Sent message id '${data.properties!.message_id}' did not match the ` +
|
||||
`received message id '${firstSend[partitionId].properties!.message_id}' for ` +
|
||||
`partitionId '${partitionId}'.`;
|
||||
throw new Error(msg);
|
||||
}
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
debug("Test logs: Starting my-eph-1");
|
||||
await host.start(onMessage, onError);
|
||||
while (count < ids.length) {
|
||||
await delay(10000);
|
||||
debug("Test logs: number of partitionIds: %d, count: %d", ids.length, count);
|
||||
}
|
||||
await host.stop();
|
||||
|
||||
debug(
|
||||
"Test logs: Restarting the same host. This time the initial offset should be ignored, and " +
|
||||
"the EventPosition should be from the checkpointed offset.."
|
||||
);
|
||||
debug("Test logs: Sending the second set of test messages...");
|
||||
const secondSend = await sendAcrossAllPartitions(ehc, ids);
|
||||
let count2 = 0;
|
||||
const onMessage2: OnReceivedMessage = async (
|
||||
context: PartitionContext,
|
||||
data: EventData
|
||||
) => {
|
||||
const partitionId = context.partitionId;
|
||||
debug("Test logs: Rx message from '%s': '%s'", partitionId, data);
|
||||
if (data.properties!.message_id === secondSend[partitionId].properties!.message_id) {
|
||||
debug("Test logs: Checkpointing the received message...");
|
||||
await context.checkpoint();
|
||||
count2++;
|
||||
} else {
|
||||
const msg =
|
||||
`Sent message id '${data.properties!.message_id}' did not match the ` +
|
||||
`received message id '${secondSend[partitionId].properties!.message_id}' for ` +
|
||||
`partitionId '${partitionId}'.`;
|
||||
throw new Error(msg);
|
||||
}
|
||||
};
|
||||
const onError2: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
debug("Test logs: Starting my-eph-2");
|
||||
await host.start(onMessage2, onError2);
|
||||
while (count2 < ids.length) {
|
||||
await delay(10000);
|
||||
debug("Test logs: number of partitionIds: %d, count: %d", ids.length, count);
|
||||
}
|
||||
debug("Test logs: sleeping for 10 more seconds....");
|
||||
await delay(10000);
|
||||
await host.stop();
|
||||
await ehc.close();
|
||||
if (count2 > ids.length) {
|
||||
throw new Error("We received more messages than we were expecting...");
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("multiple", function(): void {
|
||||
it("should be able to run multiple eph successfully.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
const ehc = EventHubClient.createFromConnectionString(ehConnString!, hubName!);
|
||||
const containerName: string = `sharedhost-${uuid()}`;
|
||||
const now = Date.now();
|
||||
const hostByName: Dictionary<EventProcessorHost> = {};
|
||||
const sendDataByPartition: Dictionary<EventData> = {};
|
||||
const getReceivingFromPartitionsForAllEph = (): Dictionary<string[]> => {
|
||||
const receivingPartitionsByHost: Dictionary<string[]> = {};
|
||||
for (const hostName in hostByName) {
|
||||
receivingPartitionsByHost[hostName] = hostByName[hostName].receivingFromPartitions;
|
||||
}
|
||||
debug("Test logs: EPH -> Partitions: \n%O", receivingPartitionsByHost);
|
||||
return receivingPartitionsByHost;
|
||||
};
|
||||
|
||||
const sendEvents = async (ids: string[]) => {
|
||||
for (let i = 0; i < ids.length; i++) {
|
||||
const data: EventData = {
|
||||
body: `Hello World - ${ids[i]}!!`
|
||||
};
|
||||
sendDataByPartition[ids[i]] = data;
|
||||
await ehc.send(data, ids[i]);
|
||||
debug("Test logs: Sent data to partition: %s", ids[i]);
|
||||
}
|
||||
};
|
||||
|
||||
const ids = await ehc.getPartitionIds();
|
||||
for (let i = 0; i < ids.length; i++) {
|
||||
const hostName = `host-${i}`;
|
||||
hostByName[hostName] = EventProcessorHost.createFromConnectionString(
|
||||
hostName,
|
||||
storageConnString!,
|
||||
containerName,
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(now)
|
||||
}
|
||||
);
|
||||
|
||||
const onError: OnReceivedError = (error: Error) => {
|
||||
debug(`Test logs: [%s] Received error: %O`, hostName, error);
|
||||
throw error;
|
||||
};
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug(
|
||||
"Test logs: [%s] Rx message from '%s': '%O'",
|
||||
hostName,
|
||||
context.partitionId,
|
||||
data
|
||||
);
|
||||
should.equal(sendDataByPartition[context.partitionId].body, data.body);
|
||||
};
|
||||
await hostByName[hostName].start(onMessage, onError);
|
||||
debug("Test logs: Sleeping for 8 seconds after starting %s.", hostName);
|
||||
await delay(8000);
|
||||
debug(
|
||||
"Test logs: [%s] currently receiving messages from partitions : %o",
|
||||
hostName,
|
||||
hostByName[hostName].receivingFromPartitions
|
||||
);
|
||||
}
|
||||
debug("Test logs: Sleeping for another 15 seconds.");
|
||||
await delay(15000);
|
||||
const hostToPartition = getReceivingFromPartitionsForAllEph();
|
||||
for (const host in hostToPartition) {
|
||||
should.equal(Array.isArray(hostToPartition[host]), true);
|
||||
hostToPartition[host].length.should.eql(1);
|
||||
}
|
||||
await sendEvents(ids);
|
||||
await delay(5000);
|
||||
await ehc.close();
|
||||
for (const host in hostByName) {
|
||||
await hostByName[host].stop();
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("runtimeInfo", function(): void {
|
||||
it("should get hub runtime info correctly", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const hubRuntimeInfo = await host.getHubRuntimeInformation();
|
||||
should.equal(Array.isArray(hubRuntimeInfo.partitionIds), true);
|
||||
should.equal(typeof hubRuntimeInfo.partitionCount, "number");
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should get partition runtime info correctly with partitionId as string", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const partitionInfo = await host.getPartitionInformation("0");
|
||||
debug("Test logs: partitionInfo: %o", partitionInfo);
|
||||
partitionInfo.partitionId.should.equal("0");
|
||||
partitionInfo.type.should.equal("com.microsoft:partition");
|
||||
partitionInfo.hubPath.should.equal(hubName);
|
||||
partitionInfo.lastEnqueuedTimeUtc.should.be.instanceof(Date);
|
||||
should.exist(partitionInfo.lastSequenceNumber);
|
||||
should.exist(partitionInfo.lastEnqueuedOffset);
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should get partition runtime info correctly with partitionId as number", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const partitionInfo = await host.getPartitionInformation(0);
|
||||
partitionInfo.partitionId.should.equal("0");
|
||||
partitionInfo.type.should.equal("com.microsoft:partition");
|
||||
partitionInfo.hubPath.should.equal(hubName);
|
||||
partitionInfo.lastEnqueuedTimeUtc.should.be.instanceof(Date);
|
||||
should.exist(partitionInfo.lastSequenceNumber);
|
||||
should.exist(partitionInfo.lastEnqueuedOffset);
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail getting partition information when partitionId is not a string or number", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
try {
|
||||
await host.getPartitionInformation(false as any);
|
||||
} catch (err: any) {
|
||||
err.message.should.equal(
|
||||
"'partitionId' is a required parameter and must be of type: 'string' | 'number'."
|
||||
);
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail getting partition information when partitionId is empty string", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
try {
|
||||
await host.getPartitionInformation("");
|
||||
} catch (err: any) {
|
||||
err.message.should.match(
|
||||
/.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi
|
||||
);
|
||||
} finally {
|
||||
await host.stop();
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail getting partition information when partitionId is a negative number", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
try {
|
||||
await host.getPartitionInformation(-1);
|
||||
} catch (err: any) {
|
||||
err.message.should.match(
|
||||
/.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi
|
||||
);
|
||||
} finally {
|
||||
await host.stop();
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("options", function(): void {
|
||||
it("should throw an error if the event hub name is neither provided in the connection string and nor in the options object", function(done: Mocha.Done): void {
|
||||
try {
|
||||
const ehc =
|
||||
"Endpoint=sb://foo.bar.baz.net/;SharedAccessKeyName=somekey;SharedAccessKey=somesecret";
|
||||
EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
ehc,
|
||||
{
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
} catch (err: any) {
|
||||
should.exist(err);
|
||||
err.message.match(
|
||||
/.*Either provide "path" or the "connectionString": "Endpoint=sb:\/\/foo\.bar\.baz\.net\/;SharedAccessKeyName=somekey;SharedAccessKey=somesecret", must contain EntityPath="<path-to-the-entity>.*"/gi
|
||||
);
|
||||
done();
|
||||
}
|
||||
});
|
||||
|
||||
it("should get hub runtime info correctly when eventhub name is present in connection string but not as an option in the options object.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
`${ehConnString!};EntityPath=${hubName!}`,
|
||||
{
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const hubRuntimeInfo = await host.getHubRuntimeInformation();
|
||||
hubRuntimeInfo.path.should.equal(hubName);
|
||||
should.equal(Array.isArray(hubRuntimeInfo.partitionIds), true);
|
||||
should.equal(typeof hubRuntimeInfo.partitionCount, "number");
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("when eventhub name is present in connection string and in the options object, the one in options object is selected.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
EventProcessorHost.createHostName(),
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("single"),
|
||||
`${ehConnString!};EntityPath=foo`,
|
||||
{
|
||||
eventHubPath: hubName,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const hubRuntimeInfo = await host.getHubRuntimeInformation();
|
||||
hubRuntimeInfo.path.should.equal(hubName);
|
||||
should.equal(Array.isArray(hubRuntimeInfo.partitionIds), true);
|
||||
should.equal(typeof hubRuntimeInfo.partitionCount, "number");
|
||||
await host.stop();
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
});
|
||||
}).timeout(1200000);
|
|
@ -1,75 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import dotenv from "dotenv";
|
||||
import chai from "chai";
|
||||
const should = chai.should();
|
||||
import chaiAsPromised from "chai-as-promised";
|
||||
chai.use(chaiAsPromised);
|
||||
import debugModule from "debug";
|
||||
const debug = debugModule("azure:eph:iothub-spec");
|
||||
import {
|
||||
EventPosition,
|
||||
OnReceivedError,
|
||||
PartitionContext,
|
||||
EventData,
|
||||
OnReceivedMessage,
|
||||
EventProcessorHost
|
||||
} from "../src";
|
||||
import { delay } from "@azure/event-hubs";
|
||||
dotenv.config();
|
||||
|
||||
describe("EPH with iothub connection string", function(): void {
|
||||
const iothubConnString = process.env.IOTHUB_CONNECTION_STRING;
|
||||
const storageConnString = process.env.STORAGE_CONNECTION_STRING;
|
||||
const hostName = EventProcessorHost.createHostName();
|
||||
let host: EventProcessorHost;
|
||||
before("validate environment", async function(): Promise<void> {
|
||||
should.exist(
|
||||
process.env.IOTHUB_CONNECTION_STRING,
|
||||
"define IOTHUB_CONNECTION_STRING in your environment before running integration tests."
|
||||
);
|
||||
});
|
||||
|
||||
it("should be able to receive messages from the event hub associated with an iothub.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
try {
|
||||
host = await EventProcessorHost.createFromIotHubConnectionString(
|
||||
hostName,
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("iot"),
|
||||
iothubConnString!,
|
||||
{
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now()),
|
||||
leaseDuration: 20,
|
||||
leaseRenewInterval: 10
|
||||
}
|
||||
);
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug("Test logs: [%s] Rx message from '%s': '%O'", hostName, context.partitionId, data);
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
const runtimeInfo = await host.getHubRuntimeInformation();
|
||||
debug("Test logs: runtimeInfo: %O", runtimeInfo);
|
||||
// tslint:disable-next-line: no-unused-expression
|
||||
runtimeInfo.createdAt.should.exist;
|
||||
(typeof runtimeInfo.partitionCount).should.equal("number");
|
||||
await host.start(onMessage, onError);
|
||||
await delay(15000);
|
||||
await host.stop();
|
||||
} catch (err: any) {
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
}).timeout(60000);
|
|
@ -1,161 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import chai from "chai";
|
||||
import chaiAsPromised from "chai-as-promised";
|
||||
chai.use(chaiAsPromised);
|
||||
import debugModule from "debug";
|
||||
const should = chai.should();
|
||||
const debug = debugModule("azure:eph:negative-spec");
|
||||
import dotenv from "dotenv";
|
||||
import {
|
||||
EventPosition,
|
||||
OnReceivedError,
|
||||
PartitionContext,
|
||||
EventData,
|
||||
OnReceivedMessage,
|
||||
EventProcessorHost
|
||||
} from "../src";
|
||||
dotenv.config();
|
||||
|
||||
describe("negative", function(): void {
|
||||
before("validate environment", function(): void {
|
||||
should.exist(
|
||||
process.env.STORAGE_CONNECTION_STRING,
|
||||
"define STORAGE_CONNECTION_STRING in your environment before running integration tests."
|
||||
);
|
||||
should.exist(
|
||||
process.env.EVENTHUB_CONNECTION_STRING,
|
||||
"define EVENTHUB_CONNECTION_STRING in your environment before running integration tests."
|
||||
);
|
||||
should.exist(
|
||||
process.env.EVENTHUB_NAME,
|
||||
"define EVENTHUB_NAME in your environment before running integration tests."
|
||||
);
|
||||
});
|
||||
const ehConnString = process.env.EVENTHUB_CONNECTION_STRING;
|
||||
const storageConnString = process.env.STORAGE_CONNECTION_STRING;
|
||||
const hubName = process.env.EVENTHUB_NAME;
|
||||
const hostName = EventProcessorHost.createHostName();
|
||||
let host: EventProcessorHost;
|
||||
it("should fail when trying to start an EPH that is already started.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
hostName,
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("tc"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug("Test logs: [%s] Rx message from '%s': '%O'", hostName, context.partitionId, data);
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
await host.start(onMessage, onError);
|
||||
try {
|
||||
debug("Test logs: [%s] Trying to start second time.", hostName);
|
||||
await host.start(onMessage, onError);
|
||||
throw new Error("The second call to start() should have failed.");
|
||||
} catch (err: any) {
|
||||
err.message.should.match(/A partition manager cannot be started multiple times/gi);
|
||||
} finally {
|
||||
await host.stop();
|
||||
should.equal(host["_context"]["partitionManager"]["_isCancelRequested"], true);
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail when the eventhub name is incorrect.", function(done: Mocha.Done): void {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
hostName,
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("tc"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: "HeloooooooFooooooo",
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug("Test logs: [%s] Rx message from '%s': '%O'", hostName, context.partitionId, data);
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
host
|
||||
.start(onMessage, onError)
|
||||
.then(() => {
|
||||
return Promise.reject(new Error("This statement should not have executed."));
|
||||
})
|
||||
.catch((err) => {
|
||||
debug("Err action: %s", err.action);
|
||||
err.action.should.equal("Getting PartitionIds");
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail when the eventhub namesapce is incorrect.", function(done: Mocha.Done): void {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
hostName,
|
||||
storageConnString!,
|
||||
EventProcessorHost.createHostName("tc"),
|
||||
"Endpoint=sb://HelooFooo.servicebus.windows.net/;SharedAccessKeyName=Foo;SharedAccessKey=Bar",
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now())
|
||||
}
|
||||
);
|
||||
const onMessage: OnReceivedMessage = (context: PartitionContext, data: EventData) => {
|
||||
debug("Test logs: [%s] Rx message from '%s': '%O'", hostName, context.partitionId, data);
|
||||
};
|
||||
const onError: OnReceivedError = (err) => {
|
||||
debug("An error occurred while receiving the message: %O", err);
|
||||
throw err;
|
||||
};
|
||||
host
|
||||
.start(onMessage, onError)
|
||||
.then(() => {
|
||||
return Promise.reject(new Error("This statement should not have executed."));
|
||||
})
|
||||
.catch((err) => {
|
||||
debug("Err action: %s", err.action);
|
||||
err.action.should.equal("Getting PartitionIds");
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail when the storage connection string is incorrect.", function(done: Mocha.Done): void {
|
||||
try {
|
||||
host = EventProcessorHost.createFromConnectionString(
|
||||
hostName,
|
||||
"Hello World"!,
|
||||
EventProcessorHost.createHostName("tc"),
|
||||
ehConnString!,
|
||||
{
|
||||
eventHubPath: hubName!,
|
||||
initialOffset: EventPosition.fromEnqueuedTime(Date.now()),
|
||||
consumerGroup: "HelloWorld"
|
||||
}
|
||||
);
|
||||
done(new Error("creating eph should have failed."));
|
||||
} catch (err: any) {
|
||||
should.exist(err);
|
||||
err.message.should.match(/Connection strings must be of the form/gi);
|
||||
done();
|
||||
}
|
||||
});
|
||||
});
|
|
@ -1,173 +0,0 @@
|
|||
// Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
import chai from "chai";
|
||||
import { retry, RetryConfig } from "../src/util/utils";
|
||||
import chaiAsPromised from "chai-as-promised";
|
||||
import { delay } from "@azure/event-hubs";
|
||||
chai.use(chaiAsPromised);
|
||||
import debugModule from "debug";
|
||||
const should = chai.should();
|
||||
const debug = debugModule("azure:eph:retry-spec");
|
||||
|
||||
describe("retry function", function(): void {
|
||||
it("should succeed if the operation succeeds.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
let counter = 0;
|
||||
try {
|
||||
const config: RetryConfig<any> = {
|
||||
operation: async () => {
|
||||
debug("counter: %d", ++counter);
|
||||
await delay(200);
|
||||
return {
|
||||
code: 200,
|
||||
description: "OK"
|
||||
};
|
||||
},
|
||||
hostName: "eph-1",
|
||||
action: "Succeed",
|
||||
maxRetries: 5,
|
||||
retryMessage: "Retry",
|
||||
finalFailureMessage: "Out of retry attempts, still failing!!"
|
||||
};
|
||||
const result = await retry(config);
|
||||
result.code.should.equal(200);
|
||||
result.description.should.equal("OK");
|
||||
counter.should.equal(1);
|
||||
} catch (err: any) {
|
||||
debug("An error occurred in a test that should have succeeded: %O", err);
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should succeed if the operation initially fails and then succeeds.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
let counter = 0;
|
||||
try {
|
||||
const config: RetryConfig<any> = {
|
||||
operation: async () => {
|
||||
await delay(200);
|
||||
debug("counter: %d", ++counter);
|
||||
if (counter === 1) {
|
||||
throw new Error("The server is busy right now. Retry later.");
|
||||
} else {
|
||||
return ["0", "1"];
|
||||
}
|
||||
},
|
||||
hostName: "eph-1",
|
||||
action: "Initially fail then suceed",
|
||||
maxRetries: 5,
|
||||
retryMessage: "Retry",
|
||||
finalFailureMessage: "Out of retry attempts, still failing!!"
|
||||
};
|
||||
const result = await retry(config);
|
||||
should.equal(Array.isArray(result), true);
|
||||
result.toString().should.equal("0,1");
|
||||
counter.should.equal(2);
|
||||
} catch (err: any) {
|
||||
debug("An error occurred in a test that should have succeeded: %O", err);
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should succeed in the last attempt.", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
let counter = 0;
|
||||
try {
|
||||
const config: RetryConfig<any> = {
|
||||
operation: async () => {
|
||||
await delay(200);
|
||||
debug("counter: %d", ++counter);
|
||||
if (counter === 1) {
|
||||
const e = new Error("Error in attempt 1.");
|
||||
throw e;
|
||||
} else if (counter === 2) {
|
||||
const e = new Error("Error in attempt 2.");
|
||||
throw e;
|
||||
} else {
|
||||
return {
|
||||
code: 200,
|
||||
description: "OK"
|
||||
};
|
||||
}
|
||||
},
|
||||
hostName: "eph-1",
|
||||
action: "Success in last attempt",
|
||||
maxRetries: 3,
|
||||
retryMessage: "Retry",
|
||||
finalFailureMessage: "Out of retry attempts, still failing!!"
|
||||
};
|
||||
const result = await retry(config);
|
||||
result.code.should.equal(200);
|
||||
result.description.should.equal("OK");
|
||||
counter.should.equal(3);
|
||||
} catch (err: any) {
|
||||
debug("An error occurred in a test that should have succeeded: %O", err);
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
||||
it("should fail if all attempts return an error", function(done: Mocha.Done): void {
|
||||
const test = async () => {
|
||||
let counter = 0;
|
||||
try {
|
||||
const config: RetryConfig<any> = {
|
||||
operation: async () => {
|
||||
debug("counter: %d", ++counter);
|
||||
await delay(200);
|
||||
const e = new Error("I would always like to fail, keep retrying.");
|
||||
throw e;
|
||||
},
|
||||
hostName: "eph-1",
|
||||
action: "Fail after 5 attempts",
|
||||
maxRetries: 5,
|
||||
retryMessage: "Retry",
|
||||
partitionId: "1",
|
||||
finalFailureMessage: "Out of retry attempts, still failing!!"
|
||||
};
|
||||
await retry(config);
|
||||
} catch (err: any) {
|
||||
counter.should.equal(5);
|
||||
should.exist(err);
|
||||
err.action.should.equal("Fail after 5 attempts");
|
||||
err.hostName.should.equal("eph-1");
|
||||
err.partitionId.should.equal("1");
|
||||
should.exist(err.error);
|
||||
err.error.message.should.match(
|
||||
/Out of retry attempts, still failing!! while performing the action "Fail after 5 attempts" due to Error\: I would always like to fail, keep retrying.*/gi
|
||||
);
|
||||
}
|
||||
};
|
||||
test()
|
||||
.then(() => {
|
||||
done();
|
||||
})
|
||||
.catch((err) => {
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"extends": "../../../tsconfig.package",
|
||||
"compilerOptions": {
|
||||
"declarationDir": "./typings",
|
||||
"outDir": "./dist-esm",
|
||||
"downlevelIteration": true
|
||||
},
|
||||
"exclude": ["node_modules", "./types/**/*.d.ts", "./samples/**/*.ts"],
|
||||
"include": ["./src/**/*.ts", "./test/**/*.ts"]
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/tsdoc/v0/tsdoc.schema.json",
|
||||
"extends": ["../../../tsdoc.json"]
|
||||
}
|
Загрузка…
Ссылка в новой задаче