From bcb1827548713a4d239050257a3e1023af0e4474 Mon Sep 17 00:00:00 2001 From: chradek <51000525+chradek@users.noreply.github.com> Date: Fri, 17 Sep 2021 10:07:16 -0700 Subject: [PATCH] [event-hubs] Enable unit-test for node.js in CI (#17492) Replaces #14568 Uses the `@azure/mock-hub` package to start a local mocked version of Event Hubs to run tests against. There are a lot of whitespace changes (indentation) so I recommend viewing with whitespace changes hidden. --- sdk/eventhub/ci.yml | 2 +- sdk/eventhub/event-hubs/karma.conf.js | 3 +- sdk/eventhub/event-hubs/package.json | 13 +- .../event-hubs/scripts/generateCerts.js | 82 + .../event-hubs/test/internal/amqp.spec.ts | 99 +- .../event-hubs/test/internal/auth.spec.ts | 280 +- .../test/internal/cancellation.spec.ts | 278 +- .../event-hubs/test/internal/client.spec.ts | 1356 +++---- .../event-hubs/test/internal/config.spec.ts | 257 +- .../test/internal/dataTransformer.spec.ts | 689 ++-- .../internal/diagnostics/messageSpan.spec.ts | 57 +- .../eventHubConsumerClientUnitTests.spec.ts | 848 +++-- .../test/internal/eventPosition.spec.ts | 260 +- .../test/internal/eventProcessor.spec.ts | 3299 +++++++++-------- .../test/internal/eventdata.spec.ts | 441 +-- .../test/internal/impl/partitionGate.spec.ts | 27 +- .../internal/loadBalancingStrategy.spec.ts | 1097 +++--- .../event-hubs/test/internal/misc.spec.ts | 814 ++-- .../test/internal/node/disconnect.spec.ts | 340 +- .../test/internal/node/packageInfo.spec.ts | 38 +- .../test/internal/partitionPump.spec.ts | 247 +- .../test/internal/receiveBatch.spec.ts | 268 +- .../event-hubs/test/internal/sender.spec.ts | 2118 +++++------ .../test/public/amqpAnnotatedMessage.spec.ts | 497 +-- .../event-hubs/test/public/auth.spec.ts | 642 ++-- .../test/public/cancellation.spec.ts | 296 +- .../event-hubs/test/public/eventData.spec.ts | 203 +- .../public/eventHubConsumerClient.spec.ts | 2168 +++++------ .../event-hubs/test/public/hubruntime.spec.ts | 748 ++-- .../test/public/node/client.spec.ts | 163 +- .../test/public/node/disconnects.spec.ts | 396 +- .../event-hubs/test/public/receiver.spec.ts | 853 ++--- .../test/public/utils/mockService.browser.ts | 6 + .../test/public/utils/mockService.ts | 22 + .../event-hubs/test/public/utils/testUtils.ts | 29 +- .../test/public/utils/testWithServiceTypes.ts | 43 + .../samples/javascript/src/ehSample.js | 15 +- .../samples/typescript/src/ehSample.ts | 15 +- sdk/eventhub/mock-hub/src/index.ts | 4 +- .../src/sender/streamingPartitionSender.ts | 5 +- .../mock-hub/src/server/mockServer.ts | 29 +- .../mock-hub/src/services/eventHubs.ts | 30 +- 42 files changed, 9844 insertions(+), 9233 deletions(-) create mode 100644 sdk/eventhub/event-hubs/scripts/generateCerts.js create mode 100644 sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts create mode 100644 sdk/eventhub/event-hubs/test/public/utils/mockService.ts create mode 100644 sdk/eventhub/event-hubs/test/public/utils/testWithServiceTypes.ts diff --git a/sdk/eventhub/ci.yml b/sdk/eventhub/ci.yml index 6ec10b06521b..125e815f1d98 100644 --- a/sdk/eventhub/ci.yml +++ b/sdk/eventhub/ci.yml @@ -24,7 +24,7 @@ extends: template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml parameters: ServiceDirectory: eventhub - RunUnitTests: false + RunUnitTests: true Artifacts: - name: azure-event-hubs safeName: azureeventhubs diff --git a/sdk/eventhub/event-hubs/karma.conf.js b/sdk/eventhub/event-hubs/karma.conf.js index df89a6b4727d..187c45ebc353 100644 --- a/sdk/eventhub/event-hubs/karma.conf.js +++ b/sdk/eventhub/event-hubs/karma.conf.js @@ -48,7 +48,8 @@ module.exports = function(config) { envPreprocessor: [ "EVENTHUB_CONNECTION_STRING", "EVENTHUB_NAME", - "IOTHUB_EH_COMPATIBLE_CONNECTION_STRING" + "IOTHUB_EH_COMPATIBLE_CONNECTION_STRING", + "TEST_TARGET" ], // test results reporter to use diff --git a/sdk/eventhub/event-hubs/package.json b/sdk/eventhub/event-hubs/package.json index 94aa5af87590..cd3b8d030932 100644 --- a/sdk/eventhub/event-hubs/package.json +++ b/sdk/eventhub/event-hubs/package.json @@ -32,7 +32,8 @@ } }, "browser": { - "./dist-esm/src/util/runtimeInfo.js": "./dist-esm/src/util/runtimeInfo.browser.js" + "./dist-esm/src/util/runtimeInfo.js": "./dist-esm/src/util/runtimeInfo.browser.js", + "./dist-esm/test/public/utils/mockService.js": "./dist-esm/test/public/utils/mockService.browser.js" }, "files": [ "dist/", @@ -49,7 +50,7 @@ "build:samples": "echo Obsolete.", "build:test:browser": "tsc -p . && cross-env ONLY_BROWSER=true rollup -c rollup.test.config.js 2>&1", "build:test:node": "tsc -p . && cross-env ONLY_NODE=true rollup -c rollup.test.config.js 2>&1", - "build:test": "tsc -p . && rollup -c rollup.test.config.js 2>&1", + "build:test": "tsc -p . && rollup -c rollup.test.config.js 2>&1 && npm run generate-certs", "build:types": "downlevel-dts types/latest types/3.1", "build": "npm run clean && tsc -p . && rollup -c 2>&1 && api-extractor run --local && npm run build:types", "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"samples-dev/**/*.ts\" \"*.{js,json}\"", @@ -57,8 +58,9 @@ "execute:samples": "dev-tool samples run samples-dev", "extract-api": "tsc -p . && api-extractor run --local", "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"samples-dev/**/*.ts\" \"*.{js,json}\"", - "integration-test:browser": "karma start --single-run", - "integration-test:node": "nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", + "generate-certs": "node ./scripts/generateCerts.js", + "integration-test:browser": "cross-env TEST_TARGET=live DISABLE_MULTI_VERSION_TESTING=true karma start --single-run", + "integration-test:node": "cross-env TEST_TARGET=live DISABLE_MULTI_VERSION_TESTING=true nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", "integration-test": "npm run integration-test:node && npm run integration-test:browser", "lint:fix": "eslint package.json api-extractor.json src test --ext .ts --fix --fix-type [problem,suggestion]", "lint": "eslint package.json api-extractor.json src test --ext .ts", @@ -68,7 +70,7 @@ "test:node": "npm run build:test && npm run unit-test:node && npm run integration-test:node", "test": "npm run build:test && npm run unit-test && npm run integration-test", "unit-test:browser": "echo skipped", - "unit-test:node": "echo skipped", + "unit-test:node": "cross-env NODE_EXTRA_CA_CERTS=\"./certs/my-private-root-ca.crt.pem\" TEST_TARGET=mock DISABLE_MULTI_VERSION_TESTING=true nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", "unit-test": "npm run unit-test:node && npm run unit-test:browser", "docs": "typedoc --excludePrivate --excludeNotExported --excludeExternals --stripInternal --mode file --out ./dist/docs ./src" }, @@ -126,6 +128,7 @@ "@azure/dev-tool": "^1.0.0", "@azure/eslint-plugin-azure-sdk": "^3.0.0", "@azure/identity": "2.0.0-beta.6", + "@azure/mock-hub": "^1.0.0", "@azure/test-utils": "^1.0.0", "@azure/test-utils-perfstress": "^1.0.0", "@microsoft/api-extractor": "^7.18.7", diff --git a/sdk/eventhub/event-hubs/scripts/generateCerts.js b/sdk/eventhub/event-hubs/scripts/generateCerts.js new file mode 100644 index 000000000000..53ecb7917b18 --- /dev/null +++ b/sdk/eventhub/event-hubs/scripts/generateCerts.js @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +const { execFileSync } = require("child_process"); +const { mkdirSync } = require("fs"); +const { resolve: resolvePath } = require("path"); + +const cwd = process.cwd(); +const certsDirectory = resolvePath(cwd, "certs"); + +// Create `certs` directory. +console.log(`Creating ${certsDirectory}`); +try { + mkdirSync(certsDirectory); +} catch (err) { + if (err.code !== "EEXIST") { + throw err; + } +} + +// Create own Root Certificate Authority +execFileSync("openssl", [ + "genrsa", + "-out", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "2048" +]); + +// Self-sign Root Certificate Authority +execFileSync("openssl", [ + "req", + "-x509", + "-new", + "-nodes", + "-key", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "-days", + "5", + "-out", + `${resolvePath(certsDirectory, "my-private-root-ca.crt.pem")}`, + "-subj", + "/C=US/ST=Washington/L=Seattle/O=Fake Signing Authority/CN=fake.foo" +]); + +// Create a certificate for localhost +execFileSync("openssl", [ + "genrsa", + "-out", + `${resolvePath(certsDirectory, "my-server.key.pem")}`, + "2048" +]); + +// Create a request which the Root Certificate Authority will sign +execFileSync("openssl", [ + "req", + "-new", + "-key", + `${resolvePath(certsDirectory, "my-server.key.pem")}`, + "-out", + `${resolvePath(certsDirectory, "my-server.csr.pem")}`, + "-subj", + "/C=US/ST=Washington/L=Seattle/O=Fake Hubs/CN=localhost" +]); + +// Sign the request with the Root Certificate Authority +execFileSync("openssl", [ + "x509", + "-req", + "-in", + `${resolvePath(certsDirectory, "my-server.csr.pem")}`, + "-CA", + `${resolvePath(certsDirectory, "my-private-root-ca.crt.pem")}`, + "-CAkey", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "-CAcreateserial", + "-out", + `${resolvePath(certsDirectory, "my-server.crt.pem")}`, + "-days", + "5" +]); + +console.log(`Certs created.`); diff --git a/sdk/eventhub/event-hubs/test/internal/amqp.spec.ts b/sdk/eventhub/event-hubs/test/internal/amqp.spec.ts index 24e9c33c4512..5a520a6ed02f 100644 --- a/sdk/eventhub/event-hubs/test/internal/amqp.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/amqp.spec.ts @@ -4,58 +4,61 @@ import chai from "chai"; import { Constants } from "@azure/core-amqp"; import { fromRheaMessage, isAmqpAnnotatedMessage } from "../../src/eventData"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; const assert = chai.assert; -describe("AMQP message encoding", () => { - it("isAmqpAnnotatedMessage", () => { - assert.isFalse(isAmqpAnnotatedMessage({})); - assert.isFalse(isAmqpAnnotatedMessage({ body: "hello world" })); - assert.isFalse( - isAmqpAnnotatedMessage( - fromRheaMessage({ - message_annotations: { - [Constants.enqueuedTime]: Date.now() - }, - body: undefined - }) - ) - ); +testWithServiceTypes(() => { + describe("AMQP message encoding", () => { + it("isAmqpAnnotatedMessage", () => { + assert.isFalse(isAmqpAnnotatedMessage({})); + assert.isFalse(isAmqpAnnotatedMessage({ body: "hello world" })); + assert.isFalse( + isAmqpAnnotatedMessage( + fromRheaMessage({ + message_annotations: { + [Constants.enqueuedTime]: Date.now() + }, + body: undefined + }) + ) + ); - assert.isTrue( - isAmqpAnnotatedMessage( - fromRheaMessage({ - message_annotations: { - [Constants.enqueuedTime]: Date.now() - }, - body: undefined - }).getRawAmqpMessage() - ) - ); + assert.isTrue( + isAmqpAnnotatedMessage( + fromRheaMessage({ + message_annotations: { + [Constants.enqueuedTime]: Date.now() + }, + body: undefined + }).getRawAmqpMessage() + ) + ); - assert.isTrue( - isAmqpAnnotatedMessage({ - body: "hello world", - bodyType: "sequence" - }) - ); - assert.isTrue( - isAmqpAnnotatedMessage({ - body: "hello world", - bodyType: "value" - }) - ); - assert.isTrue( - isAmqpAnnotatedMessage({ - body: "hello world", - bodyType: "data" - }) - ); + assert.isTrue( + isAmqpAnnotatedMessage({ + body: "hello world", + bodyType: "sequence" + }) + ); + assert.isTrue( + isAmqpAnnotatedMessage({ + body: "hello world", + bodyType: "value" + }) + ); + assert.isTrue( + isAmqpAnnotatedMessage({ + body: "hello world", + bodyType: "data" + }) + ); - assert.isTrue( - isAmqpAnnotatedMessage({ - body: "hello world", - bodyType: undefined // the property _must_ exist, but undefined is fine. We'll default to 'data' - }) - ); + assert.isTrue( + isAmqpAnnotatedMessage({ + body: "hello world", + bodyType: undefined // the property _must_ exist, but undefined is fine. We'll default to 'data' + }) + ); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/auth.spec.ts b/sdk/eventhub/event-hubs/test/internal/auth.spec.ts index a72125e1133b..115bee565f05 100644 --- a/sdk/eventhub/event-hubs/test/internal/auth.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/auth.spec.ts @@ -11,172 +11,188 @@ import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; import chai from "chai"; import { AzureNamedKeyCredential, AzureSASCredential } from "@azure/core-auth"; import { createSasTokenProvider } from "@azure/core-amqp"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; const should = chai.should(); -const env = getEnvVars(); - -describe("Authentication via", () => { - const { - endpoint, - fullyQualifiedNamespace, - sharedAccessKey, - sharedAccessKeyName - } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME], - endpoint: endpoint.replace(/\/+$/, "") - }; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - describe("Keys", () => { - describe("using connection string", () => { - it("EventHubConsumerClient", async () => { - const consumerClient = new EventHubConsumerClient( - "$Default", - service.connectionString, - service.path - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - await consumerClient.close(); - }); - it("EventHubProducerClient", async () => { - const producerClient = new EventHubProducerClient(service.connectionString, service.path); - - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); - - await producerClient.close(); - }); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); }); - describe("using NamedKeyCredential", () => { - it("EventHubConsumerClient", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("Authentication via", () => { + const { + endpoint, + fullyQualifiedNamespace, + sharedAccessKey, + sharedAccessKeyName + } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME], + endpoint: endpoint.replace(/\/+$/, "") + }; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); + describe("Keys", () => { + describe("using connection string", () => { + it("EventHubConsumerClient", async () => { + const consumerClient = new EventHubConsumerClient( + "$Default", + service.connectionString, + service.path + ); - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); - await consumerClient.close(); - }); + await consumerClient.close(); + }); - it("EventHubProducerClient", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); + it("EventHubProducerClient", async () => { + const producerClient = new EventHubProducerClient(service.connectionString, service.path); - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); + await producerClient.close(); + }); + }); - await producerClient.close(); + describe("using NamedKeyCredential", () => { + it("EventHubConsumerClient", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + await consumerClient.close(); + }); + + it("EventHubProducerClient", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); + + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); + + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); + + await producerClient.close(); + }); }); }); - }); - describe("SAS", () => { - function getSas(): string { - const parsed = parseEventHubConnectionString(service.connectionString) as Required< - | Pick - | Pick - >; - return createSasTokenProvider(parsed).getToken(`${service.endpoint}/${service.path}`).token; - } + describe("SAS", () => { + function getSas(): string { + const parsed = parseEventHubConnectionString(service.connectionString) as Required< + | Pick + | Pick + >; + return createSasTokenProvider(parsed).getToken(`${service.endpoint}/${service.path}`).token; + } - describe("using connection string", () => { - function getSasConnectionString(): string { - const sas = getSas(); + describe("using connection string", () => { + function getSasConnectionString(): string { + const sas = getSas(); - return `Endpoint=${service.endpoint}/;SharedAccessSignature=${sas}`; - } + return `Endpoint=${service.endpoint}/;SharedAccessSignature=${sas}`; + } - it("EventHubConsumerClient", async () => { - const sasConnectionString = getSasConnectionString(); + it("EventHubConsumerClient", async () => { + const sasConnectionString = getSasConnectionString(); - const consumerClient = new EventHubConsumerClient( - "$Default", - sasConnectionString, - service.path - ); + const consumerClient = new EventHubConsumerClient( + "$Default", + sasConnectionString, + service.path + ); - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); - await consumerClient.close(); - }); + await consumerClient.close(); + }); - it("EventHubProducerClient", async () => { - const sasConnectionString = getSasConnectionString(); + it("EventHubProducerClient", async () => { + const sasConnectionString = getSasConnectionString(); - const producerClient = new EventHubProducerClient(sasConnectionString, service.path); + const producerClient = new EventHubProducerClient(sasConnectionString, service.path); - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); - await producerClient.close(); + await producerClient.close(); + }); }); - }); - describe("using SASCredential", () => { - it("EventHubConsumerClient", async () => { - const sasCredential = new AzureSASCredential(getSas()); + describe("using SASCredential", () => { + it("EventHubConsumerClient", async () => { + const sasCredential = new AzureSASCredential(getSas()); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential - ); + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential + ); - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); - await consumerClient.close(); - }); + await consumerClient.close(); + }); - it("EventHubProducerClient", async () => { - const sasCredential = new AzureSASCredential(getSas()); + it("EventHubProducerClient", async () => { + const sasCredential = new AzureSASCredential(getSas()); - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - sasCredential - ); + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + sasCredential + ); - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); - await producerClient.close(); + await producerClient.close(); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/cancellation.spec.ts b/sdk/eventhub/event-hubs/test/internal/cancellation.spec.ts index a7919991b9ab..c4a9c87f7145 100644 --- a/sdk/eventhub/event-hubs/test/internal/cancellation.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/cancellation.spec.ts @@ -8,157 +8,173 @@ import chaiAsPromised from "chai-as-promised"; import { createConnectionContext } from "../../src/connectionContext"; import { EventHubReceiver } from "../../src/eventHubReceiver"; import { EventHubSender } from "../../src/eventHubSender"; +import { createMockServer } from "../public/utils/mockService"; chai.use(chaiAsPromised); import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; -const env = getEnvVars(); - -describe("Cancellation via AbortSignal", () => { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", () => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - let context: ReturnType; - beforeEach("create connection context", function() { - context = createConnectionContext(service.connectionString, service.path); - }); - - afterEach("close connection context", function() { - return context.close(); - }); - - const TEST_FAILURE = "Test failure"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - const cancellationCases = [ - { - type: "pre-aborted", - getSignal() { - const controller = new AbortController(); - controller.abort(); - return controller.signal; - } - }, - { - type: "aborted after timeout", - getSignal() { - const controller = new AbortController(); - setTimeout(() => { - controller.abort(); - }, 0); - return controller.signal; - } - } - ]; - - describe("EventHubReceiver", () => { - let client: EventHubReceiver; - beforeEach("instantiate EventHubReceiver", () => { - client = new EventHubReceiver( - context, - "$default", // consumer group - "0", // partition id - { - enqueuedOn: Date.now() - } + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("Cancellation via AbortSignal", () => { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", () => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); }); - afterEach("close EventHubReceiver", () => { - return client.close(); + let context: ReturnType; + beforeEach("create connection context", function() { + context = createConnectionContext(service.connectionString, service.path); }); - for (const { type: caseType, getSignal } of cancellationCases) { - it(`initialize supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await client.initialize({ abortSignal, timeoutInMs: 60000 }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); + afterEach("close connection context", function() { + return context.close(); + }); - it(`receiveBatch supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await client.receiveBatch(10, undefined, abortSignal); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); + const TEST_FAILURE = "Test failure"; - it(`receiveBatch supports cancellation when connection already exists (${caseType})`, async () => { - // Open the connection. - await client.initialize({ abortSignal: undefined, timeoutInMs: 60000 }); - try { - const abortSignal = getSignal(); - await client.receiveBatch(10, undefined, abortSignal); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); + const cancellationCases = [ + { + type: "pre-aborted", + getSignal() { + const controller = new AbortController(); + controller.abort(); + return controller.signal; + } + }, + { + type: "aborted after timeout", + getSignal() { + const controller = new AbortController(); + setTimeout(() => { + controller.abort(); + }, 0); + return controller.signal; } + } + ]; + + describe("EventHubReceiver", () => { + let client: EventHubReceiver; + beforeEach("instantiate EventHubReceiver", () => { + client = new EventHubReceiver( + context, + "$default", // consumer group + "0", // partition id + { + enqueuedOn: Date.now() + } + ); }); - } - }); - describe("EventHubSender", () => { - let client: EventHubSender; - beforeEach("instantiate EventHubSender", () => { - client = new EventHubSender(context); - }); + afterEach("close EventHubReceiver", () => { + return client.close(); + }); - afterEach("close EventHubSender", () => { - return client.close(); + for (const { type: caseType, getSignal } of cancellationCases) { + it(`initialize supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await client.initialize({ abortSignal, timeoutInMs: 60000 }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`receiveBatch supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await client.receiveBatch(10, undefined, abortSignal); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`receiveBatch supports cancellation when connection already exists (${caseType})`, async () => { + // Open the connection. + await client.initialize({ abortSignal: undefined, timeoutInMs: 60000 }); + try { + const abortSignal = getSignal(); + await client.receiveBatch(10, undefined, abortSignal); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + } }); - for (const { type: caseType, getSignal } of cancellationCases) { - it(`_getLink supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await client["_getLink"]({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } + describe("EventHubSender", () => { + let client: EventHubSender; + beforeEach("instantiate EventHubSender", () => { + client = new EventHubSender(context); }); - it(`getMaxMessageSize supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await client.getMaxMessageSize({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } + afterEach("close EventHubSender", () => { + return client.close(); }); - it(`send supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await client.send([{ body: "unsung hero" }], { abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); - } + for (const { type: caseType, getSignal } of cancellationCases) { + it(`_getLink supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await client["_getLink"]({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`getMaxMessageSize supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await client.getMaxMessageSize({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`send supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await client.send([{ body: "unsung hero" }], { abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + } + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/client.spec.ts b/sdk/eventhub/event-hubs/test/internal/client.spec.ts index 760de8f22974..e8383b49e41a 100644 --- a/sdk/eventhub/event-hubs/test/internal/client.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/client.spec.ts @@ -20,7 +20,8 @@ import { EnvVarKeys, getEnvVars, isNode } from "../public/utils/testUtils"; import { MessagingError } from "@azure/core-amqp"; import { ConnectionContext } from "../../src/connectionContext"; import { getRuntimeInfo } from "../../src/util/runtimeInfo"; -const env = getEnvVars(); +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; const testFailureMessage = "Test failure"; function validateConnectionError(err: E): void { @@ -31,751 +32,774 @@ function validateConnectionError(err: E): v should.not.equal(err.message, testFailureMessage); } -describe("Create EventHubConsumerClient", function(): void { - it("throws when no EntityPath in connection string", function(): void { - const connectionString = "Endpoint=sb://abc"; - const test = function(): EventHubConsumerClient { - return new EventHubConsumerClient("dummy", connectionString); - }; - test.should.throw( - Error, - `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + - `must contain "EntityPath=".` - ); - }); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { - const connectionString = - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; - const eventHubName = "event-hub-name"; - const test = function(): EventHubConsumerClient { - return new EventHubConsumerClient("dummy", connectionString, eventHubName); - }; - test.should.throw( - Error, - `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + - `doesn't match with eventHubName: "${eventHubName}".` - ); - }); + after("Stopping mock service", () => { + return service?.stop(); + }); + } - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + describe("Create EventHubConsumerClient", function(): void { + it("throws when no EntityPath in connection string", function(): void { + const connectionString = "Endpoint=sb://abc"; + const test = function(): EventHubConsumerClient { + return new EventHubConsumerClient("dummy", connectionString); + }; + test.should.throw( + Error, + `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + + `must contain "EntityPath=".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", - "my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { + const connectionString = + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; + const eventHubName = "event-hub-name"; + const test = function(): EventHubConsumerClient { + return new EventHubConsumerClient("dummy", connectionString, eventHubName); + }; + test.should.throw( + Error, + `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + + `doesn't match with eventHubName: "${eventHubName}".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 - }; - } - }; - const client = new EventHubConsumerClient( - "dummy", - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using connection string", () => { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubConsumerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", + "my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using credentials", () => { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 - }; - } - }; - const client = new EventHubConsumerClient( - "dummy", - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential, - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubConsumerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); -}); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } + }; + const client = new EventHubConsumerClient( + "dummy", + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); -describe("Create EventHubProducerClient", function(): void { - it("throws when no EntityPath in connection string ", function(): void { - const connectionString = "Endpoint=sb://abc"; - const test = function(): EventHubProducerClient { - return new EventHubProducerClient(connectionString); - }; - test.should.throw( - Error, - `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + - `must contain "EntityPath=".` - ); - }); + it("respects customEndpointAddress when using connection string", () => { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubConsumerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); - it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { - const connectionString = - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; - const eventHubName = "event-hub-name"; - const test = function(): EventHubProducerClient { - return new EventHubProducerClient(connectionString, eventHubName); - }; - test.should.throw( - Error, - `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + - `doesn't match with eventHubName: "${eventHubName}".` - ); + it("respects customEndpointAddress when using credentials", () => { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } + }; + const client = new EventHubConsumerClient( + "dummy", + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential, + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubConsumerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + describe("Create EventHubProducerClient", function(): void { + it("throws when no EntityPath in connection string ", function(): void { + const connectionString = "Endpoint=sb://abc"; + const test = function(): EventHubProducerClient { + return new EventHubProducerClient(connectionString); + }; + test.should.throw( + Error, + `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + + `must contain "EntityPath=".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", - "my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { + const connectionString = + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; + const eventHubName = "event-hub-name"; + const test = function(): EventHubProducerClient { + return new EventHubProducerClient(connectionString, eventHubName); + }; + test.should.throw( + Error, + `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + + `doesn't match with eventHubName: "${eventHubName}".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 - }; - } - }; - const client = new EventHubProducerClient( - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using connection string", () => { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubProducerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", + "my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using credentials", () => { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 - }; - } - }; - const client = new EventHubProducerClient( - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential, - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubProducerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); -}); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } + }; + const client = new EventHubProducerClient( + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); -describe("EventHubConsumerClient with non existent namespace", function(): void { - let client: EventHubConsumerClient; - beforeEach(() => { - client = new EventHubConsumerClient( - "$Default", - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" - ); - }); + it("respects customEndpointAddress when using connection string", () => { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubProducerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); - afterEach(() => { - return client.close(); + it("respects customEndpointAddress when using credentials", () => { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } + }; + const client = new EventHubProducerClient( + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential, + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubProducerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); }); - it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + describe("EventHubConsumerClient with non existent namespace", function(): void { + let client: EventHubConsumerClient; + beforeEach(() => { + client = new EventHubConsumerClient( + "$Default", + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" + ); + }); - it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + afterEach(() => { + return client.close(); + }); - it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while subscribe()", async function(): Promise { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); - } + it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); + + it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); + + it("should throw ServiceCommunicationError while subscribe()", async function(): Promise { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); }); + if (subscription) { + await subscription.close(); + } + debug(caughtErr); + validateConnectionError(caughtErr); + await client.close(); }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - validateConnectionError(caughtErr); - await client.close(); }); -}); -describe("EventHubProducerClient with non existent namespace", function(): void { - let client: EventHubProducerClient; - beforeEach(() => { - client = new EventHubProducerClient( - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" - ); - }); + describe("EventHubProducerClient with non existent namespace", function(): void { + let client: EventHubProducerClient; + beforeEach(() => { + client = new EventHubProducerClient( + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" + ); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while sending", async function(): Promise { - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError while sending", async function(): Promise { + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while creating a batch", async function(): Promise< - void - > { - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } + it("should throw ServiceCommunicationError while creating a batch", async function(): Promise< + void + > { + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); }); -}); -describe("EventHubConsumerClient with non existent event hub", function(): void { - let client: EventHubConsumerClient; - const expectedErrCode = "MessagingEntityNotFoundError"; + describe("EventHubConsumerClient with non existent event hub", function(): void { + let client: EventHubConsumerClient; + const expectedErrCode = "MessagingEntityNotFoundError"; - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); - client = new EventHubConsumerClient("dummy", env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); - }); + client = new EventHubConsumerClient( + "dummy", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "bad" + ); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< - void - > { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while subscribe()", async function(): Promise< - void - > { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); - } + it("should throw MessagingEntityNotFoundError while subscribe()", async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); }); + if (subscription) { + await subscription.close(); + } + debug(caughtErr); + should.equal(caughtErr instanceof MessagingError && caughtErr.code, expectedErrCode); + await client.close(); }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - should.equal(caughtErr instanceof MessagingError && caughtErr.code, expectedErrCode); - await client.close(); }); -}); -describe("EventHubProducerClient with non existent event hub", function(): void { - let client: EventHubProducerClient; - const expectedErrCode = "MessagingEntityNotFoundError"; + describe("EventHubProducerClient with non existent event hub", function(): void { + let client: EventHubProducerClient; + const expectedErrCode = "MessagingEntityNotFoundError"; - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - client = new EventHubProducerClient(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); - }); + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + client = new EventHubProducerClient(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< - void - > { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while sending", async function(): Promise { - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError while sending", async function(): Promise { + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while creating a batch", async function(): Promise< - void - > { - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } + it("should throw MessagingEntityNotFoundError while creating a batch", async function(): Promise< + void + > { + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); }); -}); -describe("EventHubConsumerClient User Agent String", function(): void { - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); + describe("EventHubConsumerClient User Agent String", function(): void { + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should correctly populate the default user agent", async function(): Promise { - const consumerClient = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - testUserAgentString(consumerClient["_context"]); - await consumerClient.close(); - }); + it("should correctly populate the default user agent", async function(): Promise { + const consumerClient = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + testUserAgentString(consumerClient["_context"]); + await consumerClient.close(); + }); - it("should correctly populate the custom user agent", async function(): Promise { - const customUserAgent = "boo"; - const consumerClient = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME], - { userAgent: customUserAgent } - ); - testUserAgentString(consumerClient["_context"], customUserAgent); - await consumerClient.close(); + it("should correctly populate the custom user agent", async function(): Promise { + const customUserAgent = "boo"; + const consumerClient = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME], + { userAgent: customUserAgent } + ); + testUserAgentString(consumerClient["_context"], customUserAgent); + await consumerClient.close(); + }); }); -}); -describe("EventHubProducerClient User Agent String", function(): void { - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); + describe("EventHubProducerClient User Agent String", function(): void { + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should correctly populate the default user agent", async function(): Promise { - const producerClient = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - testUserAgentString(producerClient["_context"]); - await producerClient.close(); - }); + it("should correctly populate the default user agent", async function(): Promise { + const producerClient = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + testUserAgentString(producerClient["_context"]); + await producerClient.close(); + }); - it("should correctly populate the custom user agent", async function(): Promise { - const customUserAgent = "boo"; - const producerClient = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME], - { userAgent: customUserAgent } - ); - testUserAgentString(producerClient["_context"], customUserAgent); - await producerClient.close(); + it("should correctly populate the custom user agent", async function(): Promise { + const customUserAgent = "boo"; + const producerClient = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME], + { userAgent: customUserAgent } + ); + testUserAgentString(producerClient["_context"], customUserAgent); + await producerClient.close(); + }); }); -}); -function testUserAgentString(context: ConnectionContext, customValue?: string): void { - const packageVersion = packageJsonInfo.version; - const properties = context.connection.options.properties; - properties!["user-agent"].should.startWith( - `azsdk-js-azureeventhubs/${packageVersion} (${getRuntimeInfo()})` - ); - should.equal(properties!.product, "MSJSClient"); - should.equal(properties!.version, packageVersion); - if (isNode) { - should.equal(properties!.framework, `Node/${process.version}`); - } else { - should.equal(properties!.framework.startsWith("Browser/"), true); - } - should.exist(properties!.platform); - if (customValue) { - properties!["user-agent"].should.endWith(customValue); - } -} - -describe("EventHubConsumerClient after close()", function(): void { - let client: EventHubConsumerClient; - const expectedErrorMsg = "The underlying AMQP connection is closed."; - - async function beforeEachTest(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - client = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] + function testUserAgentString(context: ConnectionContext, customValue?: string): void { + const packageVersion = packageJsonInfo.version; + const properties = context.connection.options.properties; + properties!["user-agent"].should.startWith( + `azsdk-js-azureeventhubs/${packageVersion} (${getRuntimeInfo()})` ); - - // Ensure that the connection is opened - await client.getPartitionIds(); - - // close(), so that we can then test the resulting error. - await client.close(); - } - - it("should throw connection closed error for getEventHubProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); + should.equal(properties!.product, "MSJSClient"); + should.equal(properties!.version, packageVersion); + if (isNode) { + should.equal(properties!.framework, `Node/${process.version}`); + } else { + should.equal(properties!.framework.startsWith("Browser/"), true); } - }); - - it("should throw connection closed error for getPartitionProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); + should.exist(properties!.platform); + if (customValue) { + properties!["user-agent"].should.endWith(customValue); } - }); + } - it("should throw connection closed error for getPartitionIds", async function(): Promise { - await beforeEachTest(); - try { + describe("EventHubConsumerClient after close()", function(): void { + let client: EventHubConsumerClient; + const expectedErrorMsg = "The underlying AMQP connection is closed."; + + async function beforeEachTest(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + client = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + + // Ensure that the connection is opened await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); + + // close(), so that we can then test the resulting error. + await client.close(); } - }); - it("should throw connection closed error while subscribe()", async function(): Promise { - await beforeEachTest(); - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); - } - }); + it("should throw connection closed error for getEventHubProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - should.equal(caughtErr.message, expectedErrorMsg); - }); -}); -describe("EventHubProducerClient after close()", function(): void { - let client: EventHubProducerClient; - const expectedErrorMsg = "The underlying AMQP connection is closed."; + it("should throw connection closed error for getPartitionProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - async function beforeEachTest(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - client = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); + it("should throw connection closed error for getPartitionIds", async function(): Promise { + await beforeEachTest(); + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - // Ensure that the connection is opened - await client.getPartitionIds(); + it("should throw connection closed error while subscribe()", async function(): Promise { + await beforeEachTest(); + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + if (subscription) { + await subscription.close(); + } + debug(caughtErr); + should.equal(caughtErr.message, expectedErrorMsg); + }); + }); - // close(), so that we can then test the resulting error. - await client.close(); - } + describe("EventHubProducerClient after close()", function(): void { + let client: EventHubProducerClient; + const expectedErrorMsg = "The underlying AMQP connection is closed."; + + async function beforeEachTest(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + client = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + + // Ensure that the connection is opened + await client.getPartitionIds(); - it("should throw connection closed error for getEventHubProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); + // close(), so that we can then test the resulting error. + await client.close(); } - }); - it("should throw connection closed error for getPartitionProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getEventHubProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error for getPartitionIds", async function(): Promise { - await beforeEachTest(); - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error while sending", async function(): Promise { - await beforeEachTest(); - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionIds", async function(): Promise { + await beforeEachTest(); + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error while creating a batch", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } + it("should throw connection closed error while sending", async function(): Promise { + await beforeEachTest(); + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); + + it("should throw connection closed error while creating a batch", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/config.spec.ts b/sdk/eventhub/event-hubs/test/internal/config.spec.ts index 6404b99f9320..9c11a461d486 100644 --- a/sdk/eventhub/event-hubs/test/internal/config.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/config.spec.ts @@ -3,81 +3,25 @@ import { EventHubConnectionConfig } from "../../src/eventhubConnectionConfig"; import chai from "chai"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; const should = chai.should(); -describe("ConnectionConfig", function() { - describe("EventHub", function() { - it("should fail if connection config does not contain path and the connectionstring also does not contain EntityPath", function(done) { - const connectionString = - "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak"; - try { - EventHubConnectionConfig.create(connectionString); - done(new Error("Should not have reached here.")); - } catch (err) { - err.message.should.match(/Either provide "path" or the "connectionString".*/gi); - } - done(); - }); - - it("should correctly populate config properties from an EventHubs connection string and the helper methods should work as expected", function(done) { - const config = EventHubConnectionConfig.create( - "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak;EntityPath=ep" - ); - config.should.have.property("host").that.equals("hostname.servicebus.windows.net"); - config.should.have.property("sharedAccessKeyName").that.equals("sakName"); - config.should.have.property("sharedAccessKey").that.equals("sak"); - config.should.have.property("entityPath").that.equals("ep"); - - config.getManagementAddress().should.equal("ep/$management"); - config.getSenderAddress().should.equal("ep"); - config.getSenderAddress("0").should.equal("ep/Partitions/0"); - config.getSenderAddress(0).should.equal("ep/Partitions/0"); - config.getReceiverAddress("0").should.equal("ep/ConsumerGroups/$default/Partitions/0"); - config.getReceiverAddress(0).should.equal("ep/ConsumerGroups/$default/Partitions/0"); - config.getReceiverAddress("0", "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); - config.getReceiverAddress(0, "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); - - config - .getManagementAudience() - .should.equal("sb://hostname.servicebus.windows.net/ep/$management"); - config.getSenderAudience().should.equal("sb://hostname.servicebus.windows.net/ep"); - config - .getSenderAudience("0") - .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); - config - .getSenderAudience(0) - .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); - config - .getReceiverAudience("0") - .should.equal( - "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" - ); - config - .getReceiverAudience(0) - .should.equal( - "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" - ); - config - .getReceiverAudience("0", "cg") - .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); - config - .getReceiverAudience(0, "cg") - .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); - done(); - }); - - it("requires that Endpoint be present in the connection string", (done) => { - const connectionString = `Endpoint=sb://a`; - - should.throw(() => { - EventHubConnectionConfig.create(connectionString); - }, /must contain EntityPath/); - - done(); - }); +testWithServiceTypes(() => { + describe("ConnectionConfig", function() { + describe("EventHub", function() { + it("should fail if connection config does not contain path and the connectionstring also does not contain EntityPath", function(done) { + const connectionString = + "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak"; + try { + EventHubConnectionConfig.create(connectionString); + done(new Error("Should not have reached here.")); + } catch (err) { + err.message.should.match(/Either provide "path" or the "connectionString".*/gi); + } + done(); + }); - describe("setCustomEndpointAddress", () => { - it("overwrites host", () => { + it("should correctly populate config properties from an EventHubs connection string and the helper methods should work as expected", function(done) { const config = EventHubConnectionConfig.create( "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak;EntityPath=ep" ); @@ -121,65 +65,128 @@ describe("ConnectionConfig", function() { config .getReceiverAudience(0, "cg") .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); - - EventHubConnectionConfig.setCustomEndpointAddress(config, "https://foo.private.endpoint"); - config.should.have.property("amqpHostname").that.equals("hostname.servicebus.windows.net"); - config.should.have.property("host").that.equals("foo.private.endpoint"); - config.should.not.have.property("port"); + done(); }); - it("overwrites host and port", () => { - const config = EventHubConnectionConfig.create( - "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak;EntityPath=ep" - ); - config.should.have.property("host").that.equals("hostname.servicebus.windows.net"); - config.should.have.property("sharedAccessKeyName").that.equals("sakName"); - config.should.have.property("sharedAccessKey").that.equals("sak"); - config.should.have.property("entityPath").that.equals("ep"); + it("requires that Endpoint be present in the connection string", (done) => { + const connectionString = `Endpoint=sb://a`; - config.getManagementAddress().should.equal("ep/$management"); - config.getSenderAddress().should.equal("ep"); - config.getSenderAddress("0").should.equal("ep/Partitions/0"); - config.getSenderAddress(0).should.equal("ep/Partitions/0"); - config.getReceiverAddress("0").should.equal("ep/ConsumerGroups/$default/Partitions/0"); - config.getReceiverAddress(0).should.equal("ep/ConsumerGroups/$default/Partitions/0"); - config.getReceiverAddress("0", "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); - config.getReceiverAddress(0, "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); + should.throw(() => { + EventHubConnectionConfig.create(connectionString); + }, /must contain EntityPath/); - config - .getManagementAudience() - .should.equal("sb://hostname.servicebus.windows.net/ep/$management"); - config.getSenderAudience().should.equal("sb://hostname.servicebus.windows.net/ep"); - config - .getSenderAudience("0") - .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); - config - .getSenderAudience(0) - .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); - config - .getReceiverAudience("0") - .should.equal( - "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + done(); + }); + + describe("setCustomEndpointAddress", () => { + it("overwrites host", () => { + const config = EventHubConnectionConfig.create( + "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak;EntityPath=ep" ); - config - .getReceiverAudience(0) - .should.equal( - "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + config.should.have.property("host").that.equals("hostname.servicebus.windows.net"); + config.should.have.property("sharedAccessKeyName").that.equals("sakName"); + config.should.have.property("sharedAccessKey").that.equals("sak"); + config.should.have.property("entityPath").that.equals("ep"); + + config.getManagementAddress().should.equal("ep/$management"); + config.getSenderAddress().should.equal("ep"); + config.getSenderAddress("0").should.equal("ep/Partitions/0"); + config.getSenderAddress(0).should.equal("ep/Partitions/0"); + config.getReceiverAddress("0").should.equal("ep/ConsumerGroups/$default/Partitions/0"); + config.getReceiverAddress(0).should.equal("ep/ConsumerGroups/$default/Partitions/0"); + config.getReceiverAddress("0", "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); + config.getReceiverAddress(0, "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); + + config + .getManagementAudience() + .should.equal("sb://hostname.servicebus.windows.net/ep/$management"); + config.getSenderAudience().should.equal("sb://hostname.servicebus.windows.net/ep"); + config + .getSenderAudience("0") + .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); + config + .getSenderAudience(0) + .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); + config + .getReceiverAudience("0") + .should.equal( + "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + ); + config + .getReceiverAudience(0) + .should.equal( + "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + ); + config + .getReceiverAudience("0", "cg") + .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); + config + .getReceiverAudience(0, "cg") + .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); + + EventHubConnectionConfig.setCustomEndpointAddress(config, "https://foo.private.endpoint"); + config.should.have + .property("amqpHostname") + .that.equals("hostname.servicebus.windows.net"); + config.should.have.property("host").that.equals("foo.private.endpoint"); + config.should.not.have.property("port"); + }); + + it("overwrites host and port", () => { + const config = EventHubConnectionConfig.create( + "Endpoint=sb://hostname.servicebus.windows.net/;SharedAccessKeyName=sakName;SharedAccessKey=sak;EntityPath=ep" ); - config - .getReceiverAudience("0", "cg") - .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); - config - .getReceiverAudience(0, "cg") - .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); - - EventHubConnectionConfig.setCustomEndpointAddress( - config, - "https://foo.private.endpoint:1111" - ); - config.should.have.property("amqpHostname").that.equals("hostname.servicebus.windows.net"); - config.should.have.property("host").that.equals("foo.private.endpoint"); - config.should.have.property("port").that.equals(1111); + config.should.have.property("host").that.equals("hostname.servicebus.windows.net"); + config.should.have.property("sharedAccessKeyName").that.equals("sakName"); + config.should.have.property("sharedAccessKey").that.equals("sak"); + config.should.have.property("entityPath").that.equals("ep"); + + config.getManagementAddress().should.equal("ep/$management"); + config.getSenderAddress().should.equal("ep"); + config.getSenderAddress("0").should.equal("ep/Partitions/0"); + config.getSenderAddress(0).should.equal("ep/Partitions/0"); + config.getReceiverAddress("0").should.equal("ep/ConsumerGroups/$default/Partitions/0"); + config.getReceiverAddress(0).should.equal("ep/ConsumerGroups/$default/Partitions/0"); + config.getReceiverAddress("0", "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); + config.getReceiverAddress(0, "cg").should.equal("ep/ConsumerGroups/cg/Partitions/0"); + + config + .getManagementAudience() + .should.equal("sb://hostname.servicebus.windows.net/ep/$management"); + config.getSenderAudience().should.equal("sb://hostname.servicebus.windows.net/ep"); + config + .getSenderAudience("0") + .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); + config + .getSenderAudience(0) + .should.equal("sb://hostname.servicebus.windows.net/ep/Partitions/0"); + config + .getReceiverAudience("0") + .should.equal( + "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + ); + config + .getReceiverAudience(0) + .should.equal( + "sb://hostname.servicebus.windows.net/ep/ConsumerGroups/$default/Partitions/0" + ); + config + .getReceiverAudience("0", "cg") + .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); + config + .getReceiverAudience(0, "cg") + .should.equal("sb://hostname.servicebus.windows.net/ep/ConsumerGroups/cg/Partitions/0"); + + EventHubConnectionConfig.setCustomEndpointAddress( + config, + "https://foo.private.endpoint:1111" + ); + config.should.have + .property("amqpHostname") + .that.equals("hostname.servicebus.windows.net"); + config.should.have.property("host").that.equals("foo.private.endpoint"); + config.should.have.property("port").that.equals(1111); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/dataTransformer.spec.ts b/sdk/eventhub/event-hubs/test/internal/dataTransformer.spec.ts index 14257983e175..ea02aa53f38b 100644 --- a/sdk/eventhub/event-hubs/test/internal/dataTransformer.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/dataTransformer.spec.ts @@ -7,353 +7,356 @@ const should = chai.should(); import * as assert from "assert"; import isBuffer from "is-buffer"; import { defaultDataTransformer } from "../../src/dataTransformer"; - -describe("DataTransformer", function() { - const objectBody: any = { - id: "123-456-789", - weight: 10, - isBlue: true, - siblings: [ +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; + +testWithServiceTypes(() => { + describe("DataTransformer", function() { + const objectBody: any = { + id: "123-456-789", + weight: 10, + isBlue: true, + siblings: [ + { + id: "098-789-564", + weight: 20, + isBlue: false + } + ] + }; + const arrayBody = [ { id: "098-789-564", weight: 20, isBlue: false - } - ] - }; - const arrayBody = [ - { - id: "098-789-564", - weight: 20, - isBlue: false - }, - 10, - 20, - "some string" - ]; - const stringBody: string = "some string"; - const booleanBody: boolean = true; - const numberBody: number = 10.2; - const nullBody: null = null; - const undefinedBody: undefined = undefined; - const emptyStringBody: string = ""; - const bufferBody: Buffer = Buffer.from("zzz", "utf8"); - const hexBufferBody: Buffer = Buffer.from("7468697320697320612074c3a97374", "hex"); - const transformer = defaultDataTransformer; - - describe(`encoded bodyType: "data"`, () => { - const bodyType = "data"; - - it("should correctly encode/decode a string message body", function(done) { - const encoded: any = transformer.encode(stringBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(stringBody); - done(); - }); - - it("should correctly encode/decode a number message body", function(done) { - const encoded: any = transformer.encode(numberBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(numberBody); - done(); - }); - - it("should correctly encode/decode a boolean message body", function(done) { - const encoded: any = transformer.encode(booleanBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(booleanBody); - done(); - }); - - it("should correctly encode/decode a null message body", function(done) { - const encoded: any = transformer.encode(nullBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(false); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an undefined message body", function(done) { - const encoded: any = transformer.encode(undefinedBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(false); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an empty string message body", function(done) { - const encoded: any = transformer.encode(emptyStringBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(emptyStringBody); - done(); - }); - - it("should correctly encode/decode an array message body", function(done) { - const encoded: any = transformer.encode(arrayBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, arrayBody); - done(); - }); - - it("should correctly encode/decode an object message body", function(done) { - const encoded: any = transformer.encode(objectBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, objectBody); - done(); - }); - - it("should correctly encode/decode a buffer message body", function(done) { - const encoded: any = transformer.encode(bufferBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, bufferBody); - done(); - }); - - it("should correctly encode/decode a hex buffer message body", function(done) { - const encoded: any = transformer.encode(hexBufferBody, "data"); - encoded.typecode.should.equal(117); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, hexBufferBody); - done(); - }); - }); - - describe(`encoded bodyType: "value"`, () => { - const expectedTypeCode = 0x77; - const bodyType = "value"; - - it("should correctly encode/decode a string message body", function(done) { - const encoded: any = transformer.encode(stringBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(stringBody); - done(); - }); - - it("should correctly encode/decode a number message body", function(done) { - const encoded: any = transformer.encode(numberBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(numberBody); - done(); - }); - - it("should correctly encode/decode a boolean message body", function(done) { - const encoded: any = transformer.encode(booleanBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(booleanBody); - done(); - }); - - it("should correctly encode/decode a null message body", function(done) { - const encoded: any = transformer.encode(nullBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an undefined message body", function(done) { - const encoded: any = transformer.encode(undefinedBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an empty string message body", function(done) { - const encoded: any = transformer.encode(emptyStringBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - (decoded as any).should.equal(emptyStringBody); - done(); - }); - - it("should correctly encode/decode an array message body", function(done) { - const encoded: any = transformer.encode(arrayBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, arrayBody); - done(); - }); - - it("should correctly encode/decode an object message body", function(done) { - const encoded: any = transformer.encode(objectBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, objectBody); - done(); - }); - - it("should correctly encode/decode a buffer message body", function(done) { - const encoded: any = transformer.encode(bufferBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, bufferBody); - done(); - }); - - it("should correctly encode/decode a hex buffer message body", function(done) { - const encoded: any = transformer.encode(hexBufferBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - isBuffer(encoded.content).should.equal(true); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, hexBufferBody); - done(); - }); - }); - - describe(`encoded bodyType: "sequence"`, () => { - const expectedTypeCode = 0x76; - const bodyType = "sequence"; - - it("should correctly encode/decode a null message body", function(done) { - const encoded: any = transformer.encode(nullBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an undefined message body", function(done) { - const encoded: any = transformer.encode(undefinedBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly encode/decode an array message body", function(done) { - const encoded: any = transformer.encode(arrayBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, arrayBody); - done(); - }); - - it("should correctly encode/decode an object message body", function(done) { - const encoded: any = transformer.encode(objectBody, bodyType); - encoded.typecode.should.equal(expectedTypeCode); - const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); - should.equal(decodedType, bodyType); - assert.deepEqual(decoded, objectBody); - done(); - }); - }); - - describe("decode", function() { - // It is possible that we receive an AMQP value type from the messages that were sent with - // previously shipped version of the sdk. If so then we should be able to handle those scenarios. - it("should correctly decode a string message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(stringBody); - should.equal(decodedType, "value"); - (decoded as any).should.equal(stringBody); - done(); - }); - - it("should correctly decode a number message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(numberBody); - should.equal(decodedType, "value"); - (decoded as any).should.equal(numberBody); - done(); - }); - - it("should correctly decode a boolean message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(booleanBody); - should.equal(decodedType, "value"); - (decoded as any).should.equal(booleanBody); - done(); - }); - - it("should correctly decode a null message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(nullBody); - should.equal(decodedType, "value"); - should.equal(decoded, nullBody); - done(); - }); - - it("should correctly decode an undefined message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(undefinedBody); - should.equal(decodedType, "value"); - should.equal(decoded, undefined); - done(); - }); - - it("should correctly decode an empty string message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(emptyStringBody); - should.equal(decodedType, "value"); - (decoded as any).should.equal(emptyStringBody); - done(); - }); - - it("should correctly decode an array message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(arrayBody); - should.equal(decodedType, "value"); - assert.deepEqual(decoded, arrayBody); - done(); - }); - - it("should correctly decode an object message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(objectBody); - should.equal(decodedType, "value"); - assert.deepEqual(decoded, objectBody); - done(); - }); - - it("should correctly decode a buffer message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(bufferBody); - should.equal(decodedType, "data"); - assert.deepEqual(decoded, bufferBody); - done(); - }); - - it("should correctly decode a hex buffer message body", function(done) { - const { body: decoded, bodyType: decodedType } = transformer.decode(hexBufferBody); - should.equal(decodedType, "data"); - assert.deepEqual(decoded, hexBufferBody); - done(); + }, + 10, + 20, + "some string" + ]; + const stringBody: string = "some string"; + const booleanBody: boolean = true; + const numberBody: number = 10.2; + const nullBody: null = null; + const undefinedBody: undefined = undefined; + const emptyStringBody: string = ""; + const bufferBody: Buffer = Buffer.from("zzz", "utf8"); + const hexBufferBody: Buffer = Buffer.from("7468697320697320612074c3a97374", "hex"); + const transformer = defaultDataTransformer; + + describe(`encoded bodyType: "data"`, () => { + const bodyType = "data"; + + it("should correctly encode/decode a string message body", function(done) { + const encoded: any = transformer.encode(stringBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(stringBody); + done(); + }); + + it("should correctly encode/decode a number message body", function(done) { + const encoded: any = transformer.encode(numberBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(numberBody); + done(); + }); + + it("should correctly encode/decode a boolean message body", function(done) { + const encoded: any = transformer.encode(booleanBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(booleanBody); + done(); + }); + + it("should correctly encode/decode a null message body", function(done) { + const encoded: any = transformer.encode(nullBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(false); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an undefined message body", function(done) { + const encoded: any = transformer.encode(undefinedBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(false); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an empty string message body", function(done) { + const encoded: any = transformer.encode(emptyStringBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(emptyStringBody); + done(); + }); + + it("should correctly encode/decode an array message body", function(done) { + const encoded: any = transformer.encode(arrayBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, arrayBody); + done(); + }); + + it("should correctly encode/decode an object message body", function(done) { + const encoded: any = transformer.encode(objectBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, objectBody); + done(); + }); + + it("should correctly encode/decode a buffer message body", function(done) { + const encoded: any = transformer.encode(bufferBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, bufferBody); + done(); + }); + + it("should correctly encode/decode a hex buffer message body", function(done) { + const encoded: any = transformer.encode(hexBufferBody, "data"); + encoded.typecode.should.equal(117); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, hexBufferBody); + done(); + }); + }); + + describe(`encoded bodyType: "value"`, () => { + const expectedTypeCode = 0x77; + const bodyType = "value"; + + it("should correctly encode/decode a string message body", function(done) { + const encoded: any = transformer.encode(stringBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(stringBody); + done(); + }); + + it("should correctly encode/decode a number message body", function(done) { + const encoded: any = transformer.encode(numberBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(numberBody); + done(); + }); + + it("should correctly encode/decode a boolean message body", function(done) { + const encoded: any = transformer.encode(booleanBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(booleanBody); + done(); + }); + + it("should correctly encode/decode a null message body", function(done) { + const encoded: any = transformer.encode(nullBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an undefined message body", function(done) { + const encoded: any = transformer.encode(undefinedBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an empty string message body", function(done) { + const encoded: any = transformer.encode(emptyStringBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + (decoded as any).should.equal(emptyStringBody); + done(); + }); + + it("should correctly encode/decode an array message body", function(done) { + const encoded: any = transformer.encode(arrayBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, arrayBody); + done(); + }); + + it("should correctly encode/decode an object message body", function(done) { + const encoded: any = transformer.encode(objectBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, objectBody); + done(); + }); + + it("should correctly encode/decode a buffer message body", function(done) { + const encoded: any = transformer.encode(bufferBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, bufferBody); + done(); + }); + + it("should correctly encode/decode a hex buffer message body", function(done) { + const encoded: any = transformer.encode(hexBufferBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + isBuffer(encoded.content).should.equal(true); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, hexBufferBody); + done(); + }); + }); + + describe(`encoded bodyType: "sequence"`, () => { + const expectedTypeCode = 0x76; + const bodyType = "sequence"; + + it("should correctly encode/decode a null message body", function(done) { + const encoded: any = transformer.encode(nullBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an undefined message body", function(done) { + const encoded: any = transformer.encode(undefinedBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly encode/decode an array message body", function(done) { + const encoded: any = transformer.encode(arrayBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, arrayBody); + done(); + }); + + it("should correctly encode/decode an object message body", function(done) { + const encoded: any = transformer.encode(objectBody, bodyType); + encoded.typecode.should.equal(expectedTypeCode); + const { body: decoded, bodyType: decodedType } = transformer.decode(encoded); + should.equal(decodedType, bodyType); + assert.deepEqual(decoded, objectBody); + done(); + }); + }); + + describe("decode", function() { + // It is possible that we receive an AMQP value type from the messages that were sent with + // previously shipped version of the sdk. If so then we should be able to handle those scenarios. + it("should correctly decode a string message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(stringBody); + should.equal(decodedType, "value"); + (decoded as any).should.equal(stringBody); + done(); + }); + + it("should correctly decode a number message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(numberBody); + should.equal(decodedType, "value"); + (decoded as any).should.equal(numberBody); + done(); + }); + + it("should correctly decode a boolean message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(booleanBody); + should.equal(decodedType, "value"); + (decoded as any).should.equal(booleanBody); + done(); + }); + + it("should correctly decode a null message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(nullBody); + should.equal(decodedType, "value"); + should.equal(decoded, nullBody); + done(); + }); + + it("should correctly decode an undefined message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(undefinedBody); + should.equal(decodedType, "value"); + should.equal(decoded, undefined); + done(); + }); + + it("should correctly decode an empty string message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(emptyStringBody); + should.equal(decodedType, "value"); + (decoded as any).should.equal(emptyStringBody); + done(); + }); + + it("should correctly decode an array message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(arrayBody); + should.equal(decodedType, "value"); + assert.deepEqual(decoded, arrayBody); + done(); + }); + + it("should correctly decode an object message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(objectBody); + should.equal(decodedType, "value"); + assert.deepEqual(decoded, objectBody); + done(); + }); + + it("should correctly decode a buffer message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(bufferBody); + should.equal(decodedType, "data"); + assert.deepEqual(decoded, bufferBody); + done(); + }); + + it("should correctly decode a hex buffer message body", function(done) { + const { body: decoded, bodyType: decodedType } = transformer.decode(hexBufferBody); + should.equal(decodedType, "data"); + assert.deepEqual(decoded, hexBufferBody); + done(); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/diagnostics/messageSpan.spec.ts b/sdk/eventhub/event-hubs/test/internal/diagnostics/messageSpan.spec.ts index f6ff14bb4804..53ce21e00f5d 100644 --- a/sdk/eventhub/event-hubs/test/internal/diagnostics/messageSpan.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/diagnostics/messageSpan.spec.ts @@ -4,39 +4,42 @@ import chai from "chai"; import { createMessageSpan } from "../../../src/diagnostics/tracing"; import { setTracer, resetTracer } from "@azure/test-utils"; +import { testWithServiceTypes } from "../../public/utils/testWithServiceTypes"; const should = chai.should(); const assert = chai.assert; -describe("#createMessageSpan()", () => { - before(() => { - setTracer(); - }); - - after(() => { - resetTracer(); - }); +testWithServiceTypes(() => { + describe("#createMessageSpan()", () => { + before(() => { + setTracer(); + }); - it("should create a span without a parent", () => { - const { span } = createMessageSpan( - {}, - { - entityPath: "entity path", - host: "host" - } - ); - - should.exist(span); - should.exist(span.spanContext().spanId); - should.exist(span.spanContext().traceId); - - should.equal((span as any).name, "Azure.EventHubs.message"); - assert.deepStrictEqual((span as any).attributes, { - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": "entity path", - "peer.address": "host" + after(() => { + resetTracer(); }); - span.end(); + it("should create a span without a parent", () => { + const { span } = createMessageSpan( + {}, + { + entityPath: "entity path", + host: "host" + } + ); + + should.exist(span); + should.exist(span.spanContext().spanId); + should.exist(span.spanContext().traceId); + + should.equal((span as any).name, "Azure.EventHubs.message"); + assert.deepStrictEqual((span as any).attributes, { + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": "entity path", + "peer.address": "host" + }); + + span.end(); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts index 382d16c8bc6f..8f5a2b61a8e1 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts @@ -10,461 +10,489 @@ import { BalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/ import { GreedyLoadBalancingStrategy } from "../../src/loadBalancerStrategies/greedyStrategy"; import chai from "chai"; import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; const should = chai.should(); -const env = getEnvVars(); - -describe("EventHubConsumerClient", () => { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME]! - }; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - describe("unit tests", () => { - it("isCheckpointStore", () => { - isCheckpointStore({ - processEvents: async () => { - /* no-op */ - }, - processClose: async () => { - /* no-op */ - } - }).should.not.equal(true); - isCheckpointStore("hello").should.not.equal(true); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - isCheckpointStore(new InMemoryCheckpointStore()).should.equal(true); + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("EventHubConsumerClient", () => { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME]! + }; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - describe("subscribe() overloads route properly", () => { - let client: EventHubConsumerClient; - let clientWithCheckpointStore: EventHubConsumerClient; - let subscriptionHandlers: SubscriptionEventHandlers; - let fakeEventProcessor: SinonStubbedInstance; - let validateOptions: (options: FullEventProcessorOptions) => void; - const fakeEventProcessorConstructor = ( - connectionContext: ConnectionContext, - subscriptionEventHandlers: SubscriptionEventHandlers, - checkpointStore: CheckpointStore, - options: FullEventProcessorOptions - ): SinonStubbedInstance => { - subscriptionEventHandlers.should.equal(subscriptionHandlers); - should.exist(connectionContext.managementSession); - isCheckpointStore(checkpointStore).should.equal(true); - - validateOptions(options); - - return fakeEventProcessor; - }; - - beforeEach(() => { - fakeEventProcessor = createStubInstance(EventProcessor); - - client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ); - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore() - ); - - subscriptionHandlers = { + describe("unit tests", () => { + it("isCheckpointStore", () => { + isCheckpointStore({ processEvents: async () => { /* no-op */ }, - processError: async () => { + processClose: async () => { /* no-op */ } - }; + }).should.not.equal(true); - (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - }); - - it("conflicting subscribes", () => { - validateOptions = () => { - /* no-op */ - }; + isCheckpointStore("hello").should.not.equal(true); - client.subscribe(subscriptionHandlers); - // invalid - we're already subscribed to a conflicting partition - should.throw( - () => client.subscribe("0", subscriptionHandlers), - /Partition already has a subscriber/ - ); - - clientWithCheckpointStore.subscribe("0", subscriptionHandlers); - // invalid - we're already subscribed to a conflicting partition - should.throw( - () => clientWithCheckpointStore.subscribe(subscriptionHandlers), - /Partition already has a subscriber/ - ); + isCheckpointStore(new InMemoryCheckpointStore()).should.equal(true); }); - it("subscribe to single partition, no checkpoint store, no loadBalancingOptions", () => { - validateOptions = (options) => { - // when the user doesn't pass a checkpoint store we give them a really simple set of - // defaults: - // - InMemoryCheckpointStore - // - UnbalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 - - // So we don't set an ownerlevel here - it's all in-memory and you can have as many - // as you want (the user still has the option to pass their own via SubscribeOptions). - should.not.exist(options.ownerLevel); - - // and if you don't specify a CheckpointStore we also assume you just want to read all partitions - // immediately so we use the UnbalancedLoadBalancingStrategy. - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" - ); - - options.loopIntervalInMs.should.equal(10000); - options.processingTarget!.should.equal("0"); + describe("subscribe() overloads route properly", () => { + let client: EventHubConsumerClient; + let clientWithCheckpointStore: EventHubConsumerClient; + let subscriptionHandlers: SubscriptionEventHandlers; + let fakeEventProcessor: SinonStubbedInstance; + let validateOptions: (options: FullEventProcessorOptions) => void; + const fakeEventProcessorConstructor = ( + connectionContext: ConnectionContext, + subscriptionEventHandlers: SubscriptionEventHandlers, + checkpointStore: CheckpointStore, + options: FullEventProcessorOptions + ): SinonStubbedInstance => { + subscriptionEventHandlers.should.equal(subscriptionHandlers); + should.exist(connectionContext.managementSession); + isCheckpointStore(checkpointStore).should.equal(true); + + validateOptions(options); + + return fakeEventProcessor; }; - const subscription = client.subscribe("0", subscriptionHandlers); - - subscription.close(); - fakeEventProcessor.stop.callCount.should.equal(1); - }); + beforeEach(() => { + fakeEventProcessor = createStubInstance(EventProcessor); - it("subscribe to single partition, no checkpoint store, WITH loadBalancingOptions", () => { - validateOptions = (options) => { - // When the user subscribes to a single partition, we always use the UnbalancedLoadBalancingStrategy. - // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. - // - InMemoryCheckpointStore - // - UnbalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 - - // So we don't set an ownerlevel here - it's all in-memory and you can have as many - // as you want (the user still has the option to pass their own via SubscribeOptions). - should.not.exist(options.ownerLevel); - - // and if you don't specify a CheckpointStore we also assume you just want to read all partitions - // immediately so we use the UnbalancedLoadBalancingStrategy. - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" + client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path ); - options.loopIntervalInMs.should.equal(20); - options.processingTarget!.should.equal("0"); - }; + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore() + ); - client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - { - loadBalancingOptions: { - strategy: "greedy", // ignored - partitionOwnershipExpirationIntervalInMs: 100, // ignored - updateIntervalInMs: 20 + subscriptionHandlers = { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ } - } - ); - (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - const subscription = client.subscribe("0", subscriptionHandlers); - - subscription.close(); - fakeEventProcessor.stop.callCount.should.equal(1); - }); + }; - it("subscribe to single partition, WITH checkpoint store, no loadBalancingOptions", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store but subscribes to a single partition, - // - they use their checkpoint store and the following defaults: - // - UnbalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 + (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; + }); - // To coordinate properly we set an owner level - this lets us - // cooperate properly with other consumers within this group. - options.ownerLevel!.should.equal(0); + it("conflicting subscribes", () => { + validateOptions = () => { + /* no-op */ + }; - options.processingTarget!.should.equal("0"); - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" + client.subscribe(subscriptionHandlers); + // invalid - we're already subscribed to a conflicting partition + should.throw( + () => client.subscribe("0", subscriptionHandlers), + /Partition already has a subscriber/ ); - options.loopIntervalInMs.should.equal(10000); - }; - - clientWithCheckpointStore.subscribe("0", subscriptionHandlers); - }); - - it("subscribe to single partition, WITH checkpoint store, WITH loadBalancingOptions", () => { - validateOptions = (options) => { - // When the user subscribes to a single partition, we always use the UnbalancedLoadBalancingStrategy. - // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. - // - UnbalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 - // To coordinate properly we set an owner level - this lets us - // cooperate properly with other consumers within this group. - options.ownerLevel!.should.equal(0); - - options.processingTarget!.should.equal("0"); - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" + clientWithCheckpointStore.subscribe("0", subscriptionHandlers); + // invalid - we're already subscribed to a conflicting partition + should.throw( + () => clientWithCheckpointStore.subscribe(subscriptionHandlers), + /Partition already has a subscriber/ ); - options.loopIntervalInMs.should.equal(20); - }; - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore(), - { - loadBalancingOptions: { - strategy: "greedy", // ignored - partitionOwnershipExpirationIntervalInMs: 100, // ignored - updateIntervalInMs: 20 + }); + + it("subscribe to single partition, no checkpoint store, no loadBalancingOptions", () => { + validateOptions = (options) => { + // when the user doesn't pass a checkpoint store we give them a really simple set of + // defaults: + // - InMemoryCheckpointStore + // - UnbalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + + // So we don't set an ownerlevel here - it's all in-memory and you can have as many + // as you want (the user still has the option to pass their own via SubscribeOptions). + should.not.exist(options.ownerLevel); + + // and if you don't specify a CheckpointStore we also assume you just want to read all partitions + // immediately so we use the UnbalancedLoadBalancingStrategy. + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + + options.loopIntervalInMs.should.equal(10000); + options.processingTarget!.should.equal("0"); + }; + + const subscription = client.subscribe("0", subscriptionHandlers); + + subscription.close(); + fakeEventProcessor.stop.callCount.should.equal(1); + }); + + it("subscribe to single partition, no checkpoint store, WITH loadBalancingOptions", () => { + validateOptions = (options) => { + // When the user subscribes to a single partition, we always use the UnbalancedLoadBalancingStrategy. + // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. + // - InMemoryCheckpointStore + // - UnbalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + + // So we don't set an ownerlevel here - it's all in-memory and you can have as many + // as you want (the user still has the option to pass their own via SubscribeOptions). + should.not.exist(options.ownerLevel); + + // and if you don't specify a CheckpointStore we also assume you just want to read all partitions + // immediately so we use the UnbalancedLoadBalancingStrategy. + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + + options.loopIntervalInMs.should.equal(20); + options.processingTarget!.should.equal("0"); + }; + + client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + { + loadBalancingOptions: { + strategy: "greedy", // ignored + partitionOwnershipExpirationIntervalInMs: 100, // ignored + updateIntervalInMs: 20 + } } - } - ); - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - clientWithCheckpointStore.subscribe("0", subscriptionHandlers); - }); - - it("subscribe to all partitions, no checkpoint store, no loadBalancingOptions", () => { - validateOptions = (options) => { - // when the user doesn't pass a checkpoint store we give them a really simple set of - // defaults: - // - InMemoryCheckpointStore - // - UnbalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 - should.not.exist(options.ownerLevel); - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" ); - options.loopIntervalInMs.should.equal(10000); - }; - - client.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, no checkpoint store, WITH loadBalancingOptions", () => { - validateOptions = (options) => { - // When the user doesn't provide a checkpoint store, we always use the UnbalancedLoadBalancingStrategy. - // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. - // - InMemoryCheckpointStore - // - UnbalancedLoadBalancingStrategy - should.not.exist(options.ownerLevel); - options.loadBalancingStrategy.constructor.name.should.equal( - "UnbalancedLoadBalancingStrategy" - ); - options.loopIntervalInMs.should.equal(20); - }; - - client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - { - loadBalancingOptions: { - strategy: "greedy", // ignored - partitionOwnershipExpirationIntervalInMs: 100, // ignored - updateIntervalInMs: 20 + (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; + + const subscription = client.subscribe("0", subscriptionHandlers); + + subscription.close(); + fakeEventProcessor.stop.callCount.should.equal(1); + }); + + it("subscribe to single partition, WITH checkpoint store, no loadBalancingOptions", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store but subscribes to a single partition, + // - they use their checkpoint store and the following defaults: + // - UnbalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + + // To coordinate properly we set an owner level - this lets us + // cooperate properly with other consumers within this group. + options.ownerLevel!.should.equal(0); + + options.processingTarget!.should.equal("0"); + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + options.loopIntervalInMs.should.equal(10000); + }; + + clientWithCheckpointStore.subscribe("0", subscriptionHandlers); + }); + + it("subscribe to single partition, WITH checkpoint store, WITH loadBalancingOptions", () => { + validateOptions = (options) => { + // When the user subscribes to a single partition, we always use the UnbalancedLoadBalancingStrategy. + // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. + // - UnbalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + + // To coordinate properly we set an owner level - this lets us + // cooperate properly with other consumers within this group. + options.ownerLevel!.should.equal(0); + + options.processingTarget!.should.equal("0"); + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + options.loopIntervalInMs.should.equal(20); + }; + + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore(), + { + loadBalancingOptions: { + strategy: "greedy", // ignored + partitionOwnershipExpirationIntervalInMs: 100, // ignored + updateIntervalInMs: 20 + } } - } - ); - (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - client.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, WITH checkpoint store, no loadBalancingOptions", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store we treat their consumer client as - // a "production" ready client - they use their checkpoint store and the following - // defaults: - // - BalancedLoadBalancingStrategy - // - loopIntervalInMs: 10000 - // - partitionOwnershipExpirationIntervalInMs: 60000 - options.ownerLevel!.should.equal(0); - should.not.exist(options.processingTarget); - options.loadBalancingStrategy.constructor.name.should.equal( - "BalancedLoadBalancingStrategy" ); - (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ - "_partitionOwnershipExpirationIntervalInMs" - ].should.equal(60000); - options.loopIntervalInMs.should.equal(10000); - }; - - clientWithCheckpointStore.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (greedy, updateInterval, expirationInterval)", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store and subscribes to all partitions, - // we use their loadBalancingOptions when provided. - options.ownerLevel!.should.equal(0); - should.not.exist(options.processingTarget); - options.loadBalancingStrategy.constructor.name.should.equal( - "GreedyLoadBalancingStrategy" + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; + + clientWithCheckpointStore.subscribe("0", subscriptionHandlers); + }); + + it("subscribe to all partitions, no checkpoint store, no loadBalancingOptions", () => { + validateOptions = (options) => { + // when the user doesn't pass a checkpoint store we give them a really simple set of + // defaults: + // - InMemoryCheckpointStore + // - UnbalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + should.not.exist(options.ownerLevel); + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + options.loopIntervalInMs.should.equal(10000); + }; + + client.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, no checkpoint store, WITH loadBalancingOptions", () => { + validateOptions = (options) => { + // When the user doesn't provide a checkpoint store, we always use the UnbalancedLoadBalancingStrategy. + // The loadBalancingOptions `strategy` and `partitionOwnershipExpirationIntervalInMs` fields are ignored. + // - InMemoryCheckpointStore + // - UnbalancedLoadBalancingStrategy + should.not.exist(options.ownerLevel); + options.loadBalancingStrategy.constructor.name.should.equal( + "UnbalancedLoadBalancingStrategy" + ); + options.loopIntervalInMs.should.equal(20); + }; + + client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + { + loadBalancingOptions: { + strategy: "greedy", // ignored + partitionOwnershipExpirationIntervalInMs: 100, // ignored + updateIntervalInMs: 20 + } + } ); - (options.loadBalancingStrategy as GreedyLoadBalancingStrategy)[ - "_partitionOwnershipExpirationIntervalInMs" - ].should.equal(100); - options.loopIntervalInMs.should.equal(20); - }; - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore(), - { - loadBalancingOptions: { - strategy: "greedy", - partitionOwnershipExpirationIntervalInMs: 100, - updateIntervalInMs: 20 + (client as any)["_createEventProcessor"] = fakeEventProcessorConstructor; + + client.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, WITH checkpoint store, no loadBalancingOptions", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store we treat their consumer client as + // a "production" ready client - they use their checkpoint store and the following + // defaults: + // - BalancedLoadBalancingStrategy + // - loopIntervalInMs: 10000 + // - partitionOwnershipExpirationIntervalInMs: 60000 + options.ownerLevel!.should.equal(0); + should.not.exist(options.processingTarget); + options.loadBalancingStrategy.constructor.name.should.equal( + "BalancedLoadBalancingStrategy" + ); + (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ + "_partitionOwnershipExpirationIntervalInMs" + ].should.equal(60000); + options.loopIntervalInMs.should.equal(10000); + }; + + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (greedy, updateInterval, expirationInterval)", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store and subscribes to all partitions, + // we use their loadBalancingOptions when provided. + options.ownerLevel!.should.equal(0); + should.not.exist(options.processingTarget); + options.loadBalancingStrategy.constructor.name.should.equal( + "GreedyLoadBalancingStrategy" + ); + (options.loadBalancingStrategy as GreedyLoadBalancingStrategy)[ + "_partitionOwnershipExpirationIntervalInMs" + ].should.equal(100); + options.loopIntervalInMs.should.equal(20); + }; + + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore(), + { + loadBalancingOptions: { + strategy: "greedy", + partitionOwnershipExpirationIntervalInMs: 100, + updateIntervalInMs: 20 + } } - } - ); - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - clientWithCheckpointStore.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (balanced, updateInterval, expirationInterval)", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store and subscribes to all partitions, - // we use their loadBalancingOptions when provided. - options.ownerLevel!.should.equal(0); - should.not.exist(options.processingTarget); - options.loadBalancingStrategy.constructor.name.should.equal( - "BalancedLoadBalancingStrategy" ); - (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ - "_partitionOwnershipExpirationIntervalInMs" - ].should.equal(100); - options.loopIntervalInMs.should.equal(20); - }; - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore(), - { - loadBalancingOptions: { - strategy: "balanced", - partitionOwnershipExpirationIntervalInMs: 100, - updateIntervalInMs: 20 + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; + + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (balanced, updateInterval, expirationInterval)", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store and subscribes to all partitions, + // we use their loadBalancingOptions when provided. + options.ownerLevel!.should.equal(0); + should.not.exist(options.processingTarget); + options.loadBalancingStrategy.constructor.name.should.equal( + "BalancedLoadBalancingStrategy" + ); + (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ + "_partitionOwnershipExpirationIntervalInMs" + ].should.equal(100); + options.loopIntervalInMs.should.equal(20); + }; + + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore(), + { + loadBalancingOptions: { + strategy: "balanced", + partitionOwnershipExpirationIntervalInMs: 100, + updateIntervalInMs: 20 + } } - } - ); - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - clientWithCheckpointStore.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (updateInterval, expirationInterval)", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store and subscribes to all partitions, - // we use their loadBalancingOptions when provided. - options.ownerLevel!.should.equal(0); - should.not.exist(options.processingTarget); - options.loadBalancingStrategy.constructor.name.should.equal( - "BalancedLoadBalancingStrategy" ); - (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ - "_partitionOwnershipExpirationIntervalInMs" - ].should.equal(100); - options.loopIntervalInMs.should.equal(20); - }; - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore(), - { - loadBalancingOptions: { - // default 'strategy' is 'balanced' - partitionOwnershipExpirationIntervalInMs: 100, - updateIntervalInMs: 20 + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; + + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (updateInterval, expirationInterval)", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store and subscribes to all partitions, + // we use their loadBalancingOptions when provided. + options.ownerLevel!.should.equal(0); + should.not.exist(options.processingTarget); + options.loadBalancingStrategy.constructor.name.should.equal( + "BalancedLoadBalancingStrategy" + ); + (options.loadBalancingStrategy as BalancedLoadBalancingStrategy)[ + "_partitionOwnershipExpirationIntervalInMs" + ].should.equal(100); + options.loopIntervalInMs.should.equal(20); + }; + + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore(), + { + loadBalancingOptions: { + // default 'strategy' is 'balanced' + partitionOwnershipExpirationIntervalInMs: 100, + updateIntervalInMs: 20 + } } - } - ); - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; - - clientWithCheckpointStore.subscribe(subscriptionHandlers); - }); - - it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (strategy)", () => { - validateOptions = (options) => { - // when the user gives us a checkpoint store and subscribes to all partitions, - // we use their loadBalancingOptions when provided. - options.ownerLevel!.should.equal(0); - should.not.exist(options.processingTarget); - options.loadBalancingStrategy.constructor.name.should.equal( - "GreedyLoadBalancingStrategy" ); - (options.loadBalancingStrategy as GreedyLoadBalancingStrategy)[ - "_partitionOwnershipExpirationIntervalInMs" - ].should.equal(60000); - options.loopIntervalInMs.should.equal(10000); - }; - - clientWithCheckpointStore = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // it doesn't actually matter _what_ checkpoint store gets passed in - new InMemoryCheckpointStore(), - { - loadBalancingOptions: { - strategy: "greedy" - // defaults are used for the rest of the parameters. + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; + + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); + + it("subscribe to all partitions, WITH checkpoint store, WITH loadBalancingOptions (strategy)", () => { + validateOptions = (options) => { + // when the user gives us a checkpoint store and subscribes to all partitions, + // we use their loadBalancingOptions when provided. + options.ownerLevel!.should.equal(0); + should.not.exist(options.processingTarget); + options.loadBalancingStrategy.constructor.name.should.equal( + "GreedyLoadBalancingStrategy" + ); + (options.loadBalancingStrategy as GreedyLoadBalancingStrategy)[ + "_partitionOwnershipExpirationIntervalInMs" + ].should.equal(60000); + options.loopIntervalInMs.should.equal(10000); + }; + + clientWithCheckpointStore = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // it doesn't actually matter _what_ checkpoint store gets passed in + new InMemoryCheckpointStore(), + { + loadBalancingOptions: { + strategy: "greedy" + // defaults are used for the rest of the parameters. + } } - } - ); - (clientWithCheckpointStore as any)["_createEventProcessor"] = fakeEventProcessorConstructor; + ); + (clientWithCheckpointStore as any)[ + "_createEventProcessor" + ] = fakeEventProcessorConstructor; - clientWithCheckpointStore.subscribe(subscriptionHandlers); - }); + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); - it("multiple subscribe calls from the same eventhubconsumerclient use the same owner ID", async () => { - let ownerId: string | undefined = undefined; + it("multiple subscribe calls from the same eventhubconsumerclient use the same owner ID", async () => { + let ownerId: string | undefined = undefined; - validateOptions = (options) => { - should.exist(options.ownerId); + validateOptions = (options) => { + should.exist(options.ownerId); - if (ownerId) { - options.ownerId!.should.equal(ownerId); - ownerId = options.ownerId; - } else { - ownerId = options.ownerId; - } - }; + if (ownerId) { + options.ownerId!.should.equal(ownerId); + ownerId = options.ownerId; + } else { + ownerId = options.ownerId; + } + }; - clientWithCheckpointStore.subscribe(subscriptionHandlers); + clientWithCheckpointStore.subscribe(subscriptionHandlers); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/eventPosition.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventPosition.spec.ts index a03fee4a9cc9..f55d36b2a033 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventPosition.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventPosition.spec.ts @@ -6,143 +6,149 @@ chai.should(); import { earliestEventPosition, latestEventPosition } from "../../src"; import { getEventPositionFilter, validateEventPositions } from "../../src/eventPosition"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; -describe("EventPosition", function(): void { - describe("happy", function(): void { - it("should create from an offset with inclusive false", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-offset > '1234'"; - const pos = { offset: 1234 }; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); +testWithServiceTypes(() => { + describe("EventPosition", function(): void { + describe("happy", function(): void { + it("should create from an offset with inclusive false", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-offset > '1234'"; + const pos = { offset: 1234 }; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - it("should create from a sequence with inclusive false", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-sequence-number > '0'"; - const pos = { sequenceNumber: 0 }; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); + it("should create from a sequence with inclusive false", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-sequence-number > '0'"; + const pos = { sequenceNumber: 0 }; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - it("should create from a sequence with inclusive true", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-sequence-number >= '0'"; - const pos = { sequenceNumber: 0, isInclusive: true }; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); + it("should create from a sequence with inclusive true", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-sequence-number >= '0'"; + const pos = { sequenceNumber: 0, isInclusive: true }; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - it("should create from enqueuedTime with Date as Date", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-enqueued-time > '1537406052971'"; - const d = new Date("2018-09-20T01:14:12.971Z"); - const pos = { enqueuedOn: d }; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); + it("should create from enqueuedTime with Date as Date", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-enqueued-time > '1537406052971'"; + const d = new Date("2018-09-20T01:14:12.971Z"); + const pos = { enqueuedOn: d }; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - it("should create from enqueuedTime with Date as number", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-enqueued-time > '1537406052971'"; - const d = new Date("2018-09-20T01:14:12.971Z").getTime(); - const pos = { enqueuedOn: d }; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); + it("should create from enqueuedTime with Date as number", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-enqueued-time > '1537406052971'"; + const d = new Date("2018-09-20T01:14:12.971Z").getTime(); + const pos = { enqueuedOn: d }; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - // it("should create custom filter", function(done: Mocha.Done): void { - // const custom = "amqp.annotation.x-opt-custom > 'foo-bar'"; - // const pos = EventPosition.withCustomFilter(custom); - // custom.should.equal(getEventPositionFilter(pos)); - // done(); - // }); + // it("should create custom filter", function(done: Mocha.Done): void { + // const custom = "amqp.annotation.x-opt-custom > 'foo-bar'"; + // const pos = EventPosition.withCustomFilter(custom); + // custom.should.equal(getEventPositionFilter(pos)); + // done(); + // }); - it("should create from an offset from start", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-offset > '-1'"; - const pos = earliestEventPosition; - result.should.equal(getEventPositionFilter(pos)); - done(); - }); + it("should create from an offset from start", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-offset > '-1'"; + const pos = earliestEventPosition; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); - it("should create from an offset from end", function(done: Mocha.Done): void { - const result = "amqp.annotation.x-opt-offset > '@latest'"; - const pos = latestEventPosition; - result.should.equal(getEventPositionFilter(pos)); - done(); + it("should create from an offset from end", function(done: Mocha.Done): void { + const result = "amqp.annotation.x-opt-offset > '@latest'"; + const pos = latestEventPosition; + result.should.equal(getEventPositionFilter(pos)); + done(); + }); }); - }); - describe("sad", function(): void { - it("throws error when empty object is passed for event position", () => { - const test = function(): void { - validateEventPositions({}); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Pass an object with either of offset, sequenceNumber or enqueuedOn properties set." - ); - }); - it("throws error when event position is passed with both offset and sequence number set", () => { - const test = function(): void { - validateEventPositions({ offset: 123, sequenceNumber: 456 }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); - }); - it("throws error when event position is passed with both offset and enqueuedOn set", () => { - const test = function(): void { - validateEventPositions({ offset: 123, enqueuedOn: 456 }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); - }); - it("throws error when event position is passed with both sequence number and enqueuedOn set", () => { - const test = function(): void { - validateEventPositions({ sequenceNumber: 123, enqueuedOn: 456 }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); - }); - it("throws error when empty object is passed in event position map", () => { - const test = function(): void { - validateEventPositions({ "1": {}, "2": { offset: 123 } }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Pass an object with either of offset, sequenceNumber or enqueuedOn properties set." - ); - }); - it("throws error when event position map is passed with both offset and sequence number set", () => { - const test = function(): void { - validateEventPositions({ "1": { offset: 123, sequenceNumber: 456 }, "2": { offset: 123 } }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); - }); - it("throws error when event position map is passed with both offset and enqueuedOn set", () => { - const test = function(): void { - validateEventPositions({ "1": { offset: 123, enqueuedOn: 456 }, "2": { offset: 123 } }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); - }); - it("throws error when event position map is passed with both sequence number and enqueuedOn set", () => { - const test = function(): void { - validateEventPositions({ - "1": { sequenceNumber: 123, enqueuedOn: 456 }, - "2": { offset: 123 } - }); - }; - test.should.throw( - TypeError, - "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." - ); + describe("sad", function(): void { + it("throws error when empty object is passed for event position", () => { + const test = function(): void { + validateEventPositions({}); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Pass an object with either of offset, sequenceNumber or enqueuedOn properties set." + ); + }); + it("throws error when event position is passed with both offset and sequence number set", () => { + const test = function(): void { + validateEventPositions({ offset: 123, sequenceNumber: 456 }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); + it("throws error when event position is passed with both offset and enqueuedOn set", () => { + const test = function(): void { + validateEventPositions({ offset: 123, enqueuedOn: 456 }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); + it("throws error when event position is passed with both sequence number and enqueuedOn set", () => { + const test = function(): void { + validateEventPositions({ sequenceNumber: 123, enqueuedOn: 456 }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); + it("throws error when empty object is passed in event position map", () => { + const test = function(): void { + validateEventPositions({ "1": {}, "2": { offset: 123 } }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Pass an object with either of offset, sequenceNumber or enqueuedOn properties set." + ); + }); + it("throws error when event position map is passed with both offset and sequence number set", () => { + const test = function(): void { + validateEventPositions({ + "1": { offset: 123, sequenceNumber: 456 }, + "2": { offset: 123 } + }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); + it("throws error when event position map is passed with both offset and enqueuedOn set", () => { + const test = function(): void { + validateEventPositions({ "1": { offset: 123, enqueuedOn: 456 }, "2": { offset: 123 } }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); + it("throws error when event position map is passed with both sequence number and enqueuedOn set", () => { + const test = function(): void { + validateEventPositions({ + "1": { sequenceNumber: 123, enqueuedOn: 456 }, + "2": { offset: 123 } + }); + }; + test.should.throw( + TypeError, + "Invalid value for EventPosition found. Set only one of offset, sequenceNumber or enqueuedOn properties." + ); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts index 14898c908233..9cface634c82 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts @@ -38,286 +38,512 @@ import { AbortController } from "@azure/abort-controller"; import { UnbalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/unbalancedStrategy"; import { BalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/balancedStrategy"; import { GreedyLoadBalancingStrategy } from "../../src/loadBalancerStrategies/greedyStrategy"; -const env = getEnvVars(); - -describe("Event Processor", function(): void { - const defaultOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - ownerLevel: 0, - loopIntervalInMs: 10000, - loadBalancingStrategy: new UnbalancedLoadBalancingStrategy() - }; - - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("create the client", function() { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); - - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); - - describe("unit tests", () => { - describe("_getStartingPosition", () => { - function createEventProcessor( - checkpointStore: CheckpointStore, - startPosition?: FullEventProcessorOptions["startPosition"] - ): EventProcessor { - return new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); + + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("Event Processor", function(): void { + const defaultOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + ownerLevel: 0, + loopIntervalInMs: 10000, + loadBalancingStrategy: new UnbalancedLoadBalancingStrategy() + }; + + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); + + beforeEach("create the client", function() { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + }); + + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); + }); + + describe("unit tests", () => { + describe("_getStartingPosition", () => { + function createEventProcessor( + checkpointStore: CheckpointStore, + startPosition?: FullEventProcessorOptions["startPosition"] + ): EventProcessor { + return new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } }, - processError: async () => { - /* no-op */ + checkpointStore, + { + startPosition, + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + loadBalancingStrategy: defaultOptions.loadBalancingStrategy, + loopIntervalInMs: defaultOptions.loopIntervalInMs } - }, - checkpointStore, - { - startPosition, - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - loadBalancingStrategy: defaultOptions.loadBalancingStrategy, - loopIntervalInMs: defaultOptions.loopIntervalInMs - } - ); - } + ); + } - const emptyCheckpointStore = createCheckpointStore([]); + const emptyCheckpointStore = createCheckpointStore([]); - function createCheckpointStore( - checkpointsForTest: Pick[] - ): CheckpointStore { - return { - claimOwnership: async () => { - return []; - }, - listCheckpoints: async () => { - return checkpointsForTest.map((cp) => { - return { - fullyQualifiedNamespace: "not-used-for-this-test", - consumerGroup: "not-used-for-this-test", - eventHubName: "not-used-for-this-test", - offset: cp.offset, - sequenceNumber: cp.sequenceNumber, - partitionId: cp.partitionId - }; + function createCheckpointStore( + checkpointsForTest: Pick[] + ): CheckpointStore { + return { + claimOwnership: async () => { + return []; + }, + listCheckpoints: async () => { + return checkpointsForTest.map((cp) => { + return { + fullyQualifiedNamespace: "not-used-for-this-test", + consumerGroup: "not-used-for-this-test", + eventHubName: "not-used-for-this-test", + offset: cp.offset, + sequenceNumber: cp.sequenceNumber, + partitionId: cp.partitionId + }; + }); + }, + listOwnership: async () => { + return []; + }, + updateCheckpoint: async () => { + /* no-op */ + } + }; + } + + before(() => { + consumerClient["_context"].managementSession!.getEventHubProperties = async () => { + return Promise.resolve({ + name: "boo", + createdOn: new Date(), + partitionIds: ["0", "1"] }); - }, - listOwnership: async () => { - return []; - }, - updateCheckpoint: async () => { - /* no-op */ - } - }; - } + }; + }); - before(() => { - consumerClient["_context"].managementSession!.getEventHubProperties = async () => { - return Promise.resolve({ - name: "boo", - createdOn: new Date(), - partitionIds: ["0", "1"] - }); - }; - }); + it("no checkpoint or user specified default", async () => { + const processor = createEventProcessor(emptyCheckpointStore); - it("no checkpoint or user specified default", async () => { - const processor = createEventProcessor(emptyCheckpointStore); + const eventPosition = await processor["_getStartingPosition"]("0"); + should.equal(isLatestPosition(eventPosition), true); + }); - const eventPosition = await processor["_getStartingPosition"]("0"); - should.equal(isLatestPosition(eventPosition), true); - }); + it("has a checkpoint", async () => { + const checkpointStore = createCheckpointStore([ + { + offset: 1009, + sequenceNumber: 1010, + partitionId: "0" + } + ]); - it("has a checkpoint", async () => { - const checkpointStore = createCheckpointStore([ - { - offset: 1009, - sequenceNumber: 1010, - partitionId: "0" - } - ]); + const processor = createEventProcessor( + checkpointStore, + // checkpoints always win over the user's specified position + latestEventPosition + ); - const processor = createEventProcessor( - checkpointStore, - // checkpoints always win over the user's specified position - latestEventPosition - ); + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(1009); + should.not.exist(eventPosition!.sequenceNumber); + }); - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(1009); - should.not.exist(eventPosition!.sequenceNumber); - }); + it("checkpoint with falsy values", async () => { + // this caused a bug for us before - it's a perfectly valid offset + // but we were thrown off by its falsy-ness. (actually it was + // sequence number before but the concept is the same) + const checkpointStore = createCheckpointStore([ + { + offset: 0, + sequenceNumber: 0, + partitionId: "0" + } + ]); - it("checkpoint with falsy values", async () => { - // this caused a bug for us before - it's a perfectly valid offset - // but we were thrown off by its falsy-ness. (actually it was - // sequence number before but the concept is the same) - const checkpointStore = createCheckpointStore([ - { - offset: 0, - sequenceNumber: 0, - partitionId: "0" - } - ]); + const processor = createEventProcessor(checkpointStore); + + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(0); + should.not.exist(eventPosition!.sequenceNumber); + }); - const processor = createEventProcessor(checkpointStore); + it("using a single default event position for any partition", async () => { + const processor = createEventProcessor(emptyCheckpointStore, { offset: 1009 }); - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(0); - should.not.exist(eventPosition!.sequenceNumber); - }); + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(1009); + should.not.exist(eventPosition!.sequenceNumber); + }); - it("using a single default event position for any partition", async () => { - const processor = createEventProcessor(emptyCheckpointStore, { offset: 1009 }); + it("using a fallback map", async () => { + const fallbackPositions = { "0": { offset: 2001 } }; + // we'll purposefully omit "1" which should act as "fallback to the fallback" which is earliest() - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(1009); - should.not.exist(eventPosition!.sequenceNumber); + const processor = createEventProcessor(emptyCheckpointStore, fallbackPositions); + + const eventPositionForPartitionZero = await processor["_getStartingPosition"]("0"); + eventPositionForPartitionZero!.offset!.should.equal(2001); + should.not.exist(eventPositionForPartitionZero!.sequenceNumber); + + const eventPositionForPartitionOne = await processor["_getStartingPosition"]("1"); + should.equal(isLatestPosition(eventPositionForPartitionOne), true); + }); }); - it("using a fallback map", async () => { - const fallbackPositions = { "0": { offset: 2001 } }; - // we'll purposefully omit "1" which should act as "fallback to the fallback" which is earliest() + describe("_handleSubscriptionError", () => { + let eventProcessor: EventProcessor; + let userCallback: (() => void) | undefined; + let errorFromCallback: Error | undefined; + let contextFromCallback: PartitionContext | undefined; + + beforeEach(() => { + userCallback = undefined; + errorFromCallback = undefined; + contextFromCallback = undefined; + + // note: we're not starting this event processor so there's nothing to stop() + // it's only here so we can call a few private methods on it. + eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + // simulate the user messing up and accidentally throwing an error + // we should just log it and not kill anything. + errorFromCallback = err; + contextFromCallback = context; + + if (userCallback) { + userCallback(); + } + } + }, + new InMemoryCheckpointStore(), + defaultOptions + ); + }); - const processor = createEventProcessor(emptyCheckpointStore, fallbackPositions); + it("error thrown from user's processError handler", async () => { + // the user's error handler will throw an error - won't escape from this function + userCallback = () => { + throw new Error("Error thrown from the user's error handler"); + }; - const eventPositionForPartitionZero = await processor["_getStartingPosition"]("0"); - eventPositionForPartitionZero!.offset!.should.equal(2001); - should.not.exist(eventPositionForPartitionZero!.sequenceNumber); + await eventProcessor["_handleSubscriptionError"](new Error("test error")); - const eventPositionForPartitionOne = await processor["_getStartingPosition"]("1"); - should.equal(isLatestPosition(eventPositionForPartitionOne), true); + errorFromCallback!.message.should.equal("test error"); + contextFromCallback!.partitionId.should.equal(""); + }); + + it("non-useful errors are filtered out", async () => { + // the user's error handler will throw an error - won't escape from this function + + await eventProcessor["_handleSubscriptionError"](new AbortError("test error")); + + // we don't call the user's handler for abort errors + should.not.exist(errorFromCallback); + should.not.exist(contextFromCallback); + }); }); - }); - describe("_handleSubscriptionError", () => { - let eventProcessor: EventProcessor; - let userCallback: (() => void) | undefined; - let errorFromCallback: Error | undefined; - let contextFromCallback: PartitionContext | undefined; + it("if we fail to claim partitions we don't start up new processors", async () => { + const checkpointStore = { + claimOwnershipCalled: false, - beforeEach(() => { - userCallback = undefined; - errorFromCallback = undefined; - contextFromCallback = undefined; + // the important thing is that the EventProcessor won't be able to claim + // any partitions, causing it to go down the "I tried but failed" path. + async claimOwnership(_: PartitionOwnership[]): Promise { + checkpointStore.claimOwnershipCalled = true; + return []; + }, - // note: we're not starting this event processor so there's nothing to stop() - // it's only here so we can call a few private methods on it. - eventProcessor = new EventProcessor( + // (these aren't used for this test) + async listOwnership(): Promise { + return []; + }, + async updateCheckpoint(): Promise { + /* no-op */ + }, + async listCheckpoints(): Promise { + return []; + } + }; + + const pumpManager = { + createPumpCalled: false, + + async createPump() { + pumpManager.createPumpCalled = true; + }, + + async removeAllPumps() { + /* no-op */ + }, + + isReceivingFromPartition() { + return false; + }, + + receivingFromPartitions() { + return []; + } + }; + + const eventProcessor = new EventProcessor( EventHubConsumerClient.defaultConsumerGroupName, consumerClient["_context"], { processEvents: async () => { /* no-op */ }, - processError: async (err, context) => { - // simulate the user messing up and accidentally throwing an error - // we should just log it and not kill anything. - errorFromCallback = err; - contextFromCallback = context; - - if (userCallback) { - userCallback(); - } + processError: async () => { + /* no-op */ } }, - new InMemoryCheckpointStore(), - defaultOptions + checkpointStore, + { + ...defaultOptions, + pumpManager: pumpManager + } ); + + await eventProcessor["_claimOwnership"]( + { + consumerGroup: "cgname", + eventHubName: "ehname", + fullyQualifiedNamespace: "fqdn", + ownerId: "owner", + partitionId: "0" + }, + new AbortController().signal + ); + + // when we fail to claim a partition we should _definitely_ + // not attempt to start a pump. + should.equal(pumpManager.createPumpCalled, false); + + // we'll attempt to claim a partition (but won't succeed) + should.equal(checkpointStore.claimOwnershipCalled, true); }); - it("error thrown from user's processError handler", async () => { - // the user's error handler will throw an error - won't escape from this function - userCallback = () => { - throw new Error("Error thrown from the user's error handler"); + it("abandoned claims are treated as unowned claims", async () => { + const commonFields = { + fullyQualifiedNamespace: "irrelevant namespace", + eventHubName: "irrelevant eventhub name", + consumerGroup: "irrelevant consumer group" }; - await eventProcessor["_handleSubscriptionError"](new Error("test error")); + const handlers = new FakeSubscriptionEventHandlers(); + const checkpointStore = new InMemoryCheckpointStore(); - errorFromCallback!.message.should.equal("test error"); - contextFromCallback!.partitionId.should.equal(""); - }); + const originalClaimedPartitions = await checkpointStore.claimOwnership([ + // abandoned claim + { ...commonFields, partitionId: "1001", ownerId: "", etag: "abandoned etag" }, + // normally owned claim + { ...commonFields, partitionId: "1002", ownerId: "owned partition", etag: "owned etag" } + // 1003 - completely unowned + ]); - it("non-useful errors are filtered out", async () => { - // the user's error handler will throw an error - won't escape from this function + originalClaimedPartitions.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - await eventProcessor["_handleSubscriptionError"](new AbortError("test error")); + const partitionIds = ["1001", "1002", "1003"]; - // we don't call the user's handler for abort errors - should.not.exist(errorFromCallback); - should.not.exist(contextFromCallback); - }); - }); + const fakeConnectionContext = { + managementSession: { + getEventHubProperties: async () => { + return { + partitionIds + }; + } + }, + config: { + entityPath: commonFields.eventHubName, + host: commonFields.fullyQualifiedNamespace + } + }; - it("if we fail to claim partitions we don't start up new processors", async () => { - const checkpointStore = { - claimOwnershipCalled: false, + const ep = new EventProcessor( + commonFields.consumerGroup, + fakeConnectionContext as any, + handlers, + checkpointStore, + { + maxBatchSize: 1, + loopIntervalInMs: 1, + maxWaitTimeInSeconds: 1, + pumpManager: { + async createPump() { + /* no-op */ + }, + async removeAllPumps(): Promise { + /* no-op */ + }, + isReceivingFromPartition() { + return false; + } + }, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); - // the important thing is that the EventProcessor won't be able to claim - // any partitions, causing it to go down the "I tried but failed" path. - async claimOwnership(_: PartitionOwnership[]): Promise { - checkpointStore.claimOwnershipCalled = true; - return []; - }, + // allow three iterations through the loop - one for each partition that + // we expect to be claimed + // + // we'll let one more go through just to make sure we're not going to + // pick up an extra surprise partition + // + // There are 6 places where the abort signal is checked during the loop: + // - while condition + // - getEventHubProperties + // - _performLoadBalancing (start) + // - _performLoadBalancing (after listOwnership) + // - _performLoadBalancing (passed to _claimOwnership) + // - delay + const numTimesAbortedIsCheckedInLoop = 6; + await ep["_runLoopWithLoadBalancing"]( + ep["_loadBalancingStrategy"], + triggerAbortedSignalAfterNumCalls(partitionIds.length * numTimesAbortedIsCheckedInLoop) + ); - // (these aren't used for this test) - async listOwnership(): Promise { - return []; - }, - async updateCheckpoint(): Promise { - /* no-op */ - }, - async listCheckpoints(): Promise { - return []; - } - }; + handlers.errors.should.deep.equal([]); - const pumpManager = { - createPumpCalled: false, + const currentOwnerships = await checkpointStore.listOwnership( + commonFields.fullyQualifiedNamespace, + commonFields.eventHubName, + commonFields.consumerGroup + ); + currentOwnerships.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - async createPump() { - pumpManager.createPumpCalled = true; - }, + currentOwnerships.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: ep.id, + etag: currentOwnerships[0].etag, + lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: ep.id, + etag: currentOwnerships[2].etag, + lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + } + ]); - async removeAllPumps() { - /* no-op */ - }, + // now let's "unclaim" everything by stopping our event processor + await ep.stop(); - isReceivingFromPartition() { - return false; - }, + // sanity check - we were previously modifying the original instances + // in place which...isn't right. + currentOwnerships.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: ep.id, + etag: currentOwnerships[0].etag, + lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: ep.id, + etag: currentOwnerships[2].etag, + lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + } + ]); - receivingFromPartitions() { - return []; - } + const ownershipsAfterStop = await checkpointStore.listOwnership( + commonFields.fullyQualifiedNamespace, + commonFields.eventHubName, + commonFields.consumerGroup + ); + ownershipsAfterStop.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + + ownershipsAfterStop.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: "", + etag: ownershipsAfterStop[0].etag, + lastModifiedTimeInMs: ownershipsAfterStop[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: "", + etag: ownershipsAfterStop[2].etag, + lastModifiedTimeInMs: ownershipsAfterStop[2].lastModifiedTimeInMs + } + ]); + }); + }); + + it("claimOwnership throws and is reported to the user", async () => { + const errors = []; + const partitionIds = await consumerClient.getPartitionIds(); + + const faultyCheckpointStore: CheckpointStore = { + listOwnership: async () => [], + claimOwnership: async () => { + throw new Error("Some random failure!"); + }, + updateCheckpoint: async () => { + /* no-op */ + }, + listCheckpoints: async () => [] }; const eventProcessor = new EventProcessor( @@ -327,482 +553,219 @@ describe("Event Processor", function(): void { processEvents: async () => { /* no-op */ }, - processError: async () => { - /* no-op */ + processError: async (err, _) => { + errors.push(err); } }, - checkpointStore, + faultyCheckpointStore, { - ...defaultOptions, - pumpManager: pumpManager + ...defaultOptions } ); - await eventProcessor["_claimOwnership"]( - { - consumerGroup: "cgname", - eventHubName: "ehname", - fullyQualifiedNamespace: "fqdn", - ownerId: "owner", - partitionId: "0" - }, - new AbortController().signal - ); + // claimOwnership() calls that fail in the runloop of eventProcessor + // will get directed to the user's processError handler. + eventProcessor.start(); - // when we fail to claim a partition we should _definitely_ - // not attempt to start a pump. - should.equal(pumpManager.createPumpCalled, false); + try { + await loopUntil({ + name: "waiting for checkpoint store errors to show up", + timeBetweenRunsMs: 1000, + maxTimes: 30, + until: async () => errors.length !== 0 + }); - // we'll attempt to claim a partition (but won't succeed) - should.equal(checkpointStore.claimOwnershipCalled, true); + errors.length.should.equal(partitionIds.length); + } finally { + // this will also fail - we "abandon" all claimed partitions at + // when a processor is stopped (which requires us to claim them + // with an empty owner ID). + // + // Note that this one gets thrown directly from stop(), rather + // than reporting to processError() since we have a direct + // point of contact with the user. + await eventProcessor.stop().should.be.rejectedWith(/Some random failure!/); + } }); - it("abandoned claims are treated as unowned claims", async () => { - const commonFields = { - fullyQualifiedNamespace: "irrelevant namespace", - eventHubName: "irrelevant eventhub name", - consumerGroup: "irrelevant consumer group" - }; - - const handlers = new FakeSubscriptionEventHandlers(); - const checkpointStore = new InMemoryCheckpointStore(); - - const originalClaimedPartitions = await checkpointStore.claimOwnership([ - // abandoned claim - { ...commonFields, partitionId: "1001", ownerId: "", etag: "abandoned etag" }, - // normally owned claim - { ...commonFields, partitionId: "1002", ownerId: "owned partition", etag: "owned etag" } - // 1003 - completely unowned - ]); - - originalClaimedPartitions.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - - const partitionIds = ["1001", "1002", "1003"]; - - const fakeConnectionContext = { - managementSession: { - getEventHubProperties: async () => { - return { - partitionIds - }; - } - }, - config: { - entityPath: commonFields.eventHubName, - host: commonFields.fullyQualifiedNamespace - } - }; + it("errors thrown from the user's handlers are reported to processError()", async () => { + const errors = new Set(); + const partitionIds = await consumerClient.getPartitionIds(); + + const processCloseErrorMessage = "processClose() error"; + const processEventsErrorMessage = "processEvents() error"; + const processInitializeErrorMessage = "processInitialize() error"; + const expectedErrorMessages: string[] = []; + for (let i = 0; i < partitionIds.length; i++) { + expectedErrorMessages.push( + processCloseErrorMessage, + processEventsErrorMessage, + processInitializeErrorMessage + ); + } + expectedErrorMessages.sort(); - const ep = new EventProcessor( - commonFields.consumerGroup, - fakeConnectionContext as any, - handlers, - checkpointStore, + const eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], { - maxBatchSize: 1, - loopIntervalInMs: 1, - maxWaitTimeInSeconds: 1, - pumpManager: { - async createPump() { - /* no-op */ - }, - async removeAllPumps(): Promise { - /* no-op */ - }, - isReceivingFromPartition() { - return false; - } + processClose: async () => { + throw new Error(processCloseErrorMessage); }, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - } - ); - - // allow three iterations through the loop - one for each partition that - // we expect to be claimed - // - // we'll let one more go through just to make sure we're not going to - // pick up an extra surprise partition - // - // There are 6 places where the abort signal is checked during the loop: - // - while condition - // - getEventHubProperties - // - _performLoadBalancing (start) - // - _performLoadBalancing (after listOwnership) - // - _performLoadBalancing (passed to _claimOwnership) - // - delay - const numTimesAbortedIsCheckedInLoop = 6; - await ep["_runLoopWithLoadBalancing"]( - ep["_loadBalancingStrategy"], - triggerAbortedSignalAfterNumCalls(partitionIds.length * numTimesAbortedIsCheckedInLoop) - ); - - handlers.errors.should.deep.equal([]); - - const currentOwnerships = await checkpointStore.listOwnership( - commonFields.fullyQualifiedNamespace, - commonFields.eventHubName, - commonFields.consumerGroup - ); - currentOwnerships.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - - currentOwnerships.should.deep.equal([ - { - ...commonFields, - partitionId: "1001", - ownerId: ep.id, - etag: currentOwnerships[0].etag, - lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + processEvents: async () => { + throw new Error(processEventsErrorMessage); + }, + processInitialize: async () => { + throw new Error(processInitializeErrorMessage); + }, + processError: async (err, _) => { + errors.add(err); + throw new Error("These are logged but ignored"); + } }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], + new InMemoryCheckpointStore(), { - ...commonFields, - partitionId: "1003", - ownerId: ep.id, - etag: currentOwnerships[2].etag, - lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + ...defaultOptions, + startPosition: earliestEventPosition } - ]); + ); - // now let's "unclaim" everything by stopping our event processor - await ep.stop(); + // errors that occur within the user's own event handlers will get + // routed to their processError() handler + eventProcessor.start(); + try { + await loopUntil({ + name: "waiting for errors thrown from user's handlers", + timeBetweenRunsMs: 1000, + maxTimes: 30, + until: async () => { + return errors.size >= partitionIds.length * 3; + } + }); + const messages = [...errors].map((e) => e.message); + messages.sort(); + messages.should.deep.equal(expectedErrorMessages); + } finally { + await eventProcessor.stop(); + } + }); - // sanity check - we were previously modifying the original instances - // in place which...isn't right. - currentOwnerships.should.deep.equal([ + it("should expose an id", async function(): Promise { + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], { - ...commonFields, - partitionId: "1001", - ownerId: ep.id, - etag: currentOwnerships[0].etag, - lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], + new InMemoryCheckpointStore(), { - ...commonFields, - partitionId: "1003", - ownerId: ep.id, - etag: currentOwnerships[2].etag, - lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + ...defaultOptions, + startPosition: latestEventPosition } - ]); - - const ownershipsAfterStop = await checkpointStore.listOwnership( - commonFields.fullyQualifiedNamespace, - commonFields.eventHubName, - commonFields.consumerGroup ); - ownershipsAfterStop.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - ownershipsAfterStop.should.deep.equal([ - { - ...commonFields, - partitionId: "1001", - ownerId: "", - etag: ownershipsAfterStop[0].etag, - lastModifiedTimeInMs: ownershipsAfterStop[0].lastModifiedTimeInMs - }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], - { - ...commonFields, - partitionId: "1003", - ownerId: "", - etag: ownershipsAfterStop[2].etag, - lastModifiedTimeInMs: ownershipsAfterStop[2].lastModifiedTimeInMs - } - ]); + const id = processor.id; + id.length.should.be.gt(1); }); - }); - - it("claimOwnership throws and is reported to the user", async () => { - const errors = []; - const partitionIds = await consumerClient.getPartitionIds(); - - const faultyCheckpointStore: CheckpointStore = { - listOwnership: async () => [], - claimOwnership: async () => { - throw new Error("Some random failure!"); - }, - updateCheckpoint: async () => { - /* no-op */ - }, - listCheckpoints: async () => [] - }; - const eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ + it("id can be forced to be a specific value", async function(): Promise { + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } }, - processError: async (err, _) => { - errors.push(err); - } - }, - faultyCheckpointStore, - { - ...defaultOptions - } - ); - - // claimOwnership() calls that fail in the runloop of eventProcessor - // will get directed to the user's processError handler. - eventProcessor.start(); - - try { - await loopUntil({ - name: "waiting for checkpoint store errors to show up", - timeBetweenRunsMs: 1000, - maxTimes: 30, - until: async () => errors.length !== 0 - }); - - errors.length.should.equal(partitionIds.length); - } finally { - // this will also fail - we "abandon" all claimed partitions at - // when a processor is stopped (which requires us to claim them - // with an empty owner ID). - // - // Note that this one gets thrown directly from stop(), rather - // than reporting to processError() since we have a direct - // point of contact with the user. - await eventProcessor.stop().should.be.rejectedWith(/Some random failure!/); - } - }); - - it("errors thrown from the user's handlers are reported to processError()", async () => { - const errors = new Set(); - const partitionIds = await consumerClient.getPartitionIds(); - - const processCloseErrorMessage = "processClose() error"; - const processEventsErrorMessage = "processEvents() error"; - const processInitializeErrorMessage = "processInitialize() error"; - const expectedErrorMessages: string[] = []; - for (let i = 0; i < partitionIds.length; i++) { - expectedErrorMessages.push( - processCloseErrorMessage, - processEventsErrorMessage, - processInitializeErrorMessage + new InMemoryCheckpointStore(), + { ...defaultOptions, ownerId: "hello", startPosition: latestEventPosition } ); - } - expectedErrorMessages.sort(); - - const eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processClose: async () => { - throw new Error(processCloseErrorMessage); - }, - processEvents: async () => { - throw new Error(processEventsErrorMessage); - }, - processInitialize: async () => { - throw new Error(processInitializeErrorMessage); - }, - processError: async (err, _) => { - errors.add(err); - throw new Error("These are logged but ignored"); - } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: earliestEventPosition - } - ); - - // errors that occur within the user's own event handlers will get - // routed to their processError() handler - eventProcessor.start(); - try { - await loopUntil({ - name: "waiting for errors thrown from user's handlers", - timeBetweenRunsMs: 1000, - maxTimes: 30, - until: async () => { - return errors.size >= partitionIds.length * 3; - } - }); - const messages = [...errors].map((e) => e.message); - messages.sort(); - messages.should.deep.equal(expectedErrorMessages); - } finally { - await eventProcessor.stop(); - } - }); - - it("should expose an id", async function(): Promise { - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: latestEventPosition - } - ); - - const id = processor.id; - id.length.should.be.gt(1); - }); - - it("id can be forced to be a specific value", async function(): Promise { - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - new InMemoryCheckpointStore(), - { ...defaultOptions, ownerId: "hello", startPosition: latestEventPosition } - ); - - processor.id.should.equal("hello"); - }); - - it("should treat consecutive start invocations as idempotent", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - subscriptionEventHandler, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: startPosition - } - ); - processor.start(); - processor.start(); - processor.start(); - - const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); - const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - - // shutdown the processor - await processor.stop(); + processor.id.should.equal("hello"); + }); - receivedEvents.should.deep.equal(expectedMessages); + it("should treat consecutive start invocations as idempotent", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - }); + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - it("should not throw if stop is called without start", async function(): Promise { - let didPartitionProcessorStart = false; + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processInitialize: async () => { - didPartitionProcessorStart = true; - }, - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + subscriptionEventHandler, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: startPosition } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: latestEventPosition - } - ); - - // shutdown the processor - await processor.stop(); - - didPartitionProcessorStart.should.equal(false); - }); - - it("should support start after stopping", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - subscriptionEventHandler, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: startPosition - } - ); + ); - loggerForTest(`Starting processor for the first time`); - processor.start(); + processor.start(); + processor.start(); + processor.start(); - const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); - const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); + const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); + const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - loggerForTest(`Stopping processor for the first time`); - await processor.stop(); + // shutdown the processor + await processor.stop(); - receivedEvents.should.deep.equal(expectedMessages); + receivedEvents.should.deep.equal(expectedMessages); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + }); - // validate correct events captured for each partition + it("should not throw if stop is called without start", async function(): Promise { + let didPartitionProcessorStart = false; - // start it again - loggerForTest(`Starting processor again`); - subscriptionEventHandler.clear(); + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processInitialize: async () => { + didPartitionProcessorStart = true; + }, + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: latestEventPosition + } + ); - processor.start(); + // shutdown the processor + await processor.stop(); - await subscriptionEventHandler.waitUntilInitialized(partitionIds); + didPartitionProcessorStart.should.equal(false); + }); - loggerForTest(`Stopping processor again`); - await processor.stop(); + it("should support start after stopping", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - }); + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - describe("Partition processor", function(): void { - it("should support processing events across multiple partitions", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); const { subscriptionEventHandler, startPosition @@ -819,652 +782,588 @@ describe("Event Processor", function(): void { } ); + loggerForTest(`Starting processor for the first time`); processor.start(); const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - // shutdown the processor + loggerForTest(`Stopping processor for the first time`); await processor.stop(); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - receivedEvents.should.deep.equal(expectedMessages); - }); - }); - - describe("InMemory Partition Manager", function(): void { - it("should claim ownership, get a list of ownership and update checkpoint", async function(): Promise< - void - > { - const inMemoryCheckpointStore = new InMemoryCheckpointStore(); - const partitionOwnership1: PartitionOwnership = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - ownerId: generate_uuid(), - partitionId: "0" - }; - const partitionOwnership2: PartitionOwnership = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - ownerId: generate_uuid(), - partitionId: "1" - }; - const partitionOwnership = await inMemoryCheckpointStore.claimOwnership([ - partitionOwnership1, - partitionOwnership2 - ]); - partitionOwnership.length.should.equals(2); - const ownershiplist = await inMemoryCheckpointStore.listOwnership( - "myNamespace.servicebus.windows.net", - "myEventHub", - EventHubConsumerClient.defaultConsumerGroupName - ); - ownershiplist.length.should.equals(2); - - const checkpoint: Checkpoint = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - partitionId: "0", - sequenceNumber: 10, - offset: 50 - }; - - await inMemoryCheckpointStore.updateCheckpoint(checkpoint); - const partitionOwnershipList = await inMemoryCheckpointStore.listOwnership( - "myNamespace.servicebus.windows.net", - "myEventHub", - EventHubConsumerClient.defaultConsumerGroupName - ); - partitionOwnershipList[0].partitionId.should.equals(checkpoint.partitionId); - partitionOwnershipList[0].fullyQualifiedNamespace!.should.equals( - "myNamespace.servicebus.windows.net" - ); - partitionOwnershipList[0].eventHubName!.should.equals("myEventHub"); - partitionOwnershipList[0].consumerGroup!.should.equals( - EventHubConsumerClient.defaultConsumerGroupName - ); - }); - it("should receive events from the checkpoint", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - let checkpointMap = new Map(); - partitionIds.forEach((id) => checkpointMap.set(id, [])); + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - let didError = false; - let processedAtLeastOneEvent = new Set(); - const checkpointSequenceNumbers: Map = new Map(); + // validate correct events captured for each partition - let partionCount: { [x: string]: number } = {}; + // start it again + loggerForTest(`Starting processor again`); + subscriptionEventHandler.clear(); - class FooPartitionProcessor { - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - processedAtLeastOneEvent.add(context.partitionId); + processor.start(); - if (!partionCount[context.partitionId]) { - partionCount[context.partitionId] = 0; - } - partionCount[context.partitionId]++; + await subscriptionEventHandler.waitUntilInitialized(partitionIds); - const existingEvents = checkpointMap.get(context.partitionId)!; + loggerForTest(`Stopping processor again`); + await processor.stop(); - for (const event of events) { - debug("Received event: '%s' from partition: '%s'", event.body, context.partitionId); + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + }); - if (partionCount[context.partitionId] <= 50) { - checkpointSequenceNumbers.set(context.partitionId, event.sequenceNumber); - await context.updateCheckpoint(event); - existingEvents.push(event); - } + describe("Partition processor", function(): void { + it("should support processing events across multiple partitions", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(producerClient); + + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + subscriptionEventHandler, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: startPosition } - } - async processError(): Promise { - didError = true; - } - } - - const inMemoryCheckpointStore = new InMemoryCheckpointStore(); - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - inMemoryCheckpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition - } - ); + ); - // start first processor - processor1.start(); + processor.start(); - // create messages - const expectedMessagePrefix = "EventProcessor test - checkpoint - "; - const events: EventData[] = []; + const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); + const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - for (const partitionId of partitionIds) { - for (let index = 1; index <= 100; index++) { - events.push({ body: `${expectedMessagePrefix} ${index} ${partitionId}` }); - } - await producerClient.sendBatch(events, { partitionId }); - } + // shutdown the processor + await processor.stop(); - // set a delay to give a consumers a chance to receive a message - while (checkpointSequenceNumbers.size !== partitionIds.length) { - await delay(5000); - } + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - // shutdown the first processor - await processor1.stop(); + receivedEvents.should.deep.equal(expectedMessages); + }); + }); - const lastEventsReceivedFromProcessor1: ReceivedEventData[] = []; - let index = 0; + describe("InMemory Partition Manager", function(): void { + it("should claim ownership, get a list of ownership and update checkpoint", async function(): Promise< + void + > { + const inMemoryCheckpointStore = new InMemoryCheckpointStore(); + const partitionOwnership1: PartitionOwnership = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + ownerId: generate_uuid(), + partitionId: "0" + }; + const partitionOwnership2: PartitionOwnership = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + ownerId: generate_uuid(), + partitionId: "1" + }; + const partitionOwnership = await inMemoryCheckpointStore.claimOwnership([ + partitionOwnership1, + partitionOwnership2 + ]); + partitionOwnership.length.should.equals(2); + const ownershiplist = await inMemoryCheckpointStore.listOwnership( + "myNamespace.servicebus.windows.net", + "myEventHub", + EventHubConsumerClient.defaultConsumerGroupName + ); + ownershiplist.length.should.equals(2); + + const checkpoint: Checkpoint = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + partitionId: "0", + sequenceNumber: 10, + offset: 50 + }; - for (const partitionId of partitionIds) { - const receivedEvents = checkpointMap.get(partitionId)!; - lastEventsReceivedFromProcessor1[index++] = receivedEvents[receivedEvents.length - 1]; - } + await inMemoryCheckpointStore.updateCheckpoint(checkpoint); + const partitionOwnershipList = await inMemoryCheckpointStore.listOwnership( + "myNamespace.servicebus.windows.net", + "myEventHub", + EventHubConsumerClient.defaultConsumerGroupName + ); + partitionOwnershipList[0].partitionId.should.equals(checkpoint.partitionId); + partitionOwnershipList[0].fullyQualifiedNamespace!.should.equals( + "myNamespace.servicebus.windows.net" + ); + partitionOwnershipList[0].eventHubName!.should.equals("myEventHub"); + partitionOwnershipList[0].consumerGroup!.should.equals( + EventHubConsumerClient.defaultConsumerGroupName + ); + }); - checkpointMap = new Map(); - partitionIds.forEach((id) => checkpointMap.set(id, [])); - partionCount = {}; - processedAtLeastOneEvent = new Set(); + it("should receive events from the checkpoint", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - inMemoryCheckpointStore, - { ...defaultOptions, startPosition: earliestEventPosition } - ); + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - const checkpoints = await inMemoryCheckpointStore.listCheckpoints( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + let checkpointMap = new Map(); + partitionIds.forEach((id) => checkpointMap.set(id, [])); - checkpoints.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + let didError = false; + let processedAtLeastOneEvent = new Set(); + const checkpointSequenceNumbers: Map = new Map(); - for (const checkpoint of checkpoints) { - const expectedSequenceNumber = checkpointSequenceNumbers.get(checkpoint.partitionId); - should.exist(expectedSequenceNumber); + let partionCount: { [x: string]: number } = {}; - expectedSequenceNumber!.should.equal(checkpoint.sequenceNumber); - } + class FooPartitionProcessor { + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + processedAtLeastOneEvent.add(context.partitionId); - // start second processor - processor2.start(); + if (!partionCount[context.partitionId]) { + partionCount[context.partitionId] = 0; + } + partionCount[context.partitionId]++; - // set a delay to give a consumers a chance to receive a message - while (processedAtLeastOneEvent.size !== partitionIds.length) { - await delay(5000); - } + const existingEvents = checkpointMap.get(context.partitionId)!; - // shutdown the second processor - await processor2.stop(); + for (const event of events) { + debug("Received event: '%s' from partition: '%s'", event.body, context.partitionId); - index = 0; - const firstEventsReceivedFromProcessor2: ReceivedEventData[] = []; - for (const partitionId of partitionIds) { - const receivedEvents = checkpointMap.get(partitionId)!; - firstEventsReceivedFromProcessor2[index++] = receivedEvents[0]; - } + if (partionCount[context.partitionId] <= 50) { + checkpointSequenceNumbers.set(context.partitionId, event.sequenceNumber); + await context.updateCheckpoint(event); + existingEvents.push(event); + } + } + } + async processError(): Promise { + didError = true; + } + } - didError.should.equal(false); - index = 0; - // validate correct events captured for each partition using checkpoint - for (const partitionId of partitionIds) { - debug(`Validate events for partition: ${partitionId}`); - lastEventsReceivedFromProcessor1[index].sequenceNumber.should.equal( - firstEventsReceivedFromProcessor2[index].sequenceNumber - 1 + const inMemoryCheckpointStore = new InMemoryCheckpointStore(); + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + inMemoryCheckpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition + } ); - index++; - } - }); - it("makes copies and never returns internal instances directly", async () => { - const checkpointStore = new InMemoryCheckpointStore(); - const allObjects = new Set(); + // start first processor + processor1.start(); - const assertUnique = (...objects: any[]): void => { - const size = allObjects.size; + // create messages + const expectedMessagePrefix = "EventProcessor test - checkpoint - "; + const events: EventData[] = []; - for (const obj of objects) { - allObjects.add(obj); - size.should.be.lessThan(allObjects.size); + for (const partitionId of partitionIds) { + for (let index = 1; index <= 100; index++) { + events.push({ body: `${expectedMessagePrefix} ${index} ${partitionId}` }); + } + await producerClient.sendBatch(events, { partitionId }); } - }; - - const basicProperties = { - consumerGroup: "initial consumer group", - eventHubName: "initial event hub name", - fullyQualifiedNamespace: "initial fully qualified namespace" - }; - const originalPartitionOwnership = { - ...basicProperties, - ownerId: "initial owner ID", - partitionId: "1001" - }; - - const copyOfPartitionOwnership = { - ...originalPartitionOwnership - }; + // set a delay to give a consumers a chance to receive a message + while (checkpointSequenceNumbers.size !== partitionIds.length) { + await delay(5000); + } - assertUnique(originalPartitionOwnership); + // shutdown the first processor + await processor1.stop(); - for (let i = 0; i < 2; ++i) { - const ownerships = await checkpointStore.claimOwnership([originalPartitionOwnership]); + const lastEventsReceivedFromProcessor1: ReceivedEventData[] = []; + let index = 0; - // second sanity check - we were also modifying the input parameter - // (which was also bad) - copyOfPartitionOwnership.should.deep.equal(originalPartitionOwnership); + for (const partitionId of partitionIds) { + const receivedEvents = checkpointMap.get(partitionId)!; + lastEventsReceivedFromProcessor1[index++] = receivedEvents[receivedEvents.length - 1]; + } - assertUnique(...ownerships); - } + checkpointMap = new Map(); + partitionIds.forEach((id) => checkpointMap.set(id, [])); + partionCount = {}; + processedAtLeastOneEvent = new Set(); - for (let i = 0; i < 2; ++i) { - const ownerships = await checkpointStore.listOwnership( - basicProperties.fullyQualifiedNamespace, - basicProperties.eventHubName, - basicProperties.consumerGroup + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + inMemoryCheckpointStore, + { ...defaultOptions, startPosition: earliestEventPosition } ); - assertUnique(...ownerships); - } - const originalCheckpoint: Checkpoint = { - ...basicProperties, - sequenceNumber: 1, - partitionId: "1", - offset: 101 - }; + const checkpoints = await inMemoryCheckpointStore.listCheckpoints( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); - const copyOfOriginalCheckpoint = { - ...originalCheckpoint - }; + checkpoints.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - await checkpointStore.updateCheckpoint(originalCheckpoint); + for (const checkpoint of checkpoints) { + const expectedSequenceNumber = checkpointSequenceNumbers.get(checkpoint.partitionId); + should.exist(expectedSequenceNumber); - // checking that we don't modify input parameters - copyOfOriginalCheckpoint.should.deep.equal(originalCheckpoint); + expectedSequenceNumber!.should.equal(checkpoint.sequenceNumber); + } - for (let i = 0; i < 2; ++i) { - const checkpoints = await checkpointStore.listCheckpoints( - basicProperties.fullyQualifiedNamespace, - basicProperties.eventHubName, - basicProperties.consumerGroup - ); - assertUnique(...checkpoints); - } - }); - }); + // start second processor + processor2.start(); - describe("Load balancing", function(): void { - beforeEach("validate partitions", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - // ensure we have at least 3 partitions - partitionIds.length.should.gte( - 3, - "The load balancing tests must be ran on an Event Hub with at least 3 partitions" - ); - }); + // set a delay to give a consumers a chance to receive a message + while (processedAtLeastOneEvent.size !== partitionIds.length) { + await delay(5000); + } - it("should 'steal' partitions until all the processors have reached a steady-state (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - loggerForTest("starting up the stealing test"); + // shutdown the second processor + await processor2.stop(); - const processorByName: Dictionary = {}; - const checkpointStore = new InMemoryCheckpointStore(); - const partitionIds = await producerClient.getPartitionIds(); - const partitionOwnershipArr = new Set(); - - const partitionResultsMap = new Map< - string, - { events: string[]; initialized: boolean; closeReason?: CloseReason } - >(); - partitionIds.forEach((id) => partitionResultsMap.set(id, { events: [], initialized: false })); - let didGetReceiverDisconnectedError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor implements Required { - async processInitialize(context: PartitionContext): Promise { - loggerForTest(`processInitialize(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.initialized = true; + index = 0; + const firstEventsReceivedFromProcessor2: ReceivedEventData[] = []; + for (const partitionId of partitionIds) { + const receivedEvents = checkpointMap.get(partitionId)!; + firstEventsReceivedFromProcessor2[index++] = receivedEvents[0]; } - async processClose(reason: CloseReason, context: PartitionContext): Promise { - loggerForTest(`processClose(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.closeReason = reason; - } - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - partitionOwnershipArr.add(context.partitionId); - const existingEvents = partitionResultsMap.get(context.partitionId)!.events; - existingEvents.push(...events.map((event) => event.body)); + + didError.should.equal(false); + index = 0; + // validate correct events captured for each partition using checkpoint + for (const partitionId of partitionIds) { + debug(`Validate events for partition: ${partitionId}`); + lastEventsReceivedFromProcessor1[index].sequenceNumber.should.equal( + firstEventsReceivedFromProcessor2[index].sequenceNumber - 1 + ); + index++; } - async processError(err: Error, context: PartitionContext): Promise { - loggerForTest(`processError(${context.partitionId})`); - const errorName = (err as any).code; - if (errorName === "ReceiverDisconnectedError") { - didGetReceiverDisconnectedError = true; + }); + + it("makes copies and never returns internal instances directly", async () => { + const checkpointStore = new InMemoryCheckpointStore(); + const allObjects = new Set(); + + const assertUnique = (...objects: any[]): void => { + const size = allObjects.size; + + for (const obj of objects) { + allObjects.add(obj); + size.should.be.lessThan(allObjects.size); } - } - } + }; - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } + const basicProperties = { + consumerGroup: "initial consumer group", + eventHubName: "initial event hub name", + fullyQualifiedNamespace: "initial fully qualified namespace" + }; - const processor1LoadBalancingInterval = { - loopIntervalInMs: 1000 - }; + const originalPartitionOwnership = { + ...basicProperties, + ownerId: "initial owner ID", + partitionId: "1001" + }; - // working around a potential deadlock - this allows `processor-2` to more - // aggressively pursue getting its required partitions and avoid being in - // lockstep with `processor-1` - const processor2LoadBalancingInterval = { - loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 - }; + const copyOfPartitionOwnership = { + ...originalPartitionOwnership + }; - processorByName[`processor-1`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor1LoadBalancingInterval, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - } - ); + assertUnique(originalPartitionOwnership); - processorByName[`processor-1`].start(); + for (let i = 0; i < 2; ++i) { + const ownerships = await checkpointStore.claimOwnership([originalPartitionOwnership]); - await loopUntil({ - name: "All partitions are owned", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => partitionOwnershipArr.size === partitionIds.length, - errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` - }); + // second sanity check - we were also modifying the input parameter + // (which was also bad) + copyOfPartitionOwnership.should.deep.equal(originalPartitionOwnership); - processorByName[`processor-2`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor2LoadBalancingInterval, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + assertUnique(...ownerships); } - ); - - partitionOwnershipArr.size.should.equal(partitionIds.length); - processorByName[`processor-2`].start(); - - await loopUntil({ - name: "Processors are balanced", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => { - // it should be impossible for 'processor-2' to have obtained the number of - // partitions it needed without having stolen some from 'processor-1' - // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing - // hasn't occurred yet. - if (!didGetReceiverDisconnectedError) { - return false; - } - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName + for (let i = 0; i < 2; ++i) { + const ownerships = await checkpointStore.listOwnership( + basicProperties.fullyQualifiedNamespace, + basicProperties.eventHubName, + basicProperties.consumerGroup ); + assertUnique(...ownerships); + } - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = ownershipListToMap( - partitionOwnership - ); + const originalCheckpoint: Checkpoint = { + ...basicProperties, + sequenceNumber: 1, + partitionId: "1", + offset: 101 + }; - // if stealing has occurred we just want to make sure that _all_ - // the stealing has completed. - const isBalanced = (friendlyName: string): boolean => { - const n = Math.floor(partitionIds.length / 2); - const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! - .length; - return numPartitions === n || numPartitions === n + 1; - }; + const copyOfOriginalCheckpoint = { + ...originalCheckpoint + }; - if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { - return false; - } + await checkpointStore.updateCheckpoint(originalCheckpoint); - return true; + // checking that we don't modify input parameters + copyOfOriginalCheckpoint.should.deep.equal(originalCheckpoint); + + for (let i = 0; i < 2; ++i) { + const checkpoints = await checkpointStore.listCheckpoints( + basicProperties.fullyQualifiedNamespace, + basicProperties.eventHubName, + basicProperties.consumerGroup + ); + assertUnique(...checkpoints); } }); - - for (const processor in processorByName) { - await processorByName[processor].stop(); - } - - // now that all the dust has settled let's make sure that - // a. we received some events from each partition (doesn't matter which processor) - // did the work - // b. each partition was initialized - // c. each partition should have received at least one shutdown event - for (const partitionId of partitionIds) { - const results = partitionResultsMap.get(partitionId)!; - results.events.length.should.be.gte(1); - results.initialized.should.equal(true); - (results.closeReason === CloseReason.Shutdown).should.equal(true); - } }); - it("should 'steal' partitions until all the processors have reached a steady-state (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - loggerForTest("starting up the stealing test"); + describe("Load balancing", function(): void { + beforeEach("validate partitions", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); + // ensure we have at least 3 partitions + partitionIds.length.should.gte( + 3, + "The load balancing tests must be ran on an Event Hub with at least 3 partitions" + ); + }); + + it("should 'steal' partitions until all the processors have reached a steady-state (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + loggerForTest("starting up the stealing test"); + + const processorByName: Dictionary = {}; + const checkpointStore = new InMemoryCheckpointStore(); + const partitionIds = await producerClient.getPartitionIds(); + const partitionOwnershipArr = new Set(); + + const partitionResultsMap = new Map< + string, + { events: string[]; initialized: boolean; closeReason?: CloseReason } + >(); + partitionIds.forEach((id) => + partitionResultsMap.set(id, { events: [], initialized: false }) + ); + let didGetReceiverDisconnectedError = false; - const processorByName: Dictionary = {}; - const checkpointStore = new InMemoryCheckpointStore(); - const partitionIds = await producerClient.getPartitionIds(); - const partitionOwnershipArr = new Set(); - - const partitionResultsMap = new Map< - string, - { events: string[]; initialized: boolean; closeReason?: CloseReason } - >(); - partitionIds.forEach((id) => partitionResultsMap.set(id, { events: [], initialized: false })); - let didGetReceiverDisconnectedError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor implements Required { - async processInitialize(context: PartitionContext): Promise { - loggerForTest(`processInitialize(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.initialized = true; - } - async processClose(reason: CloseReason, context: PartitionContext): Promise { - loggerForTest(`processClose(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.closeReason = reason; - } - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - partitionOwnershipArr.add(context.partitionId); - const existingEvents = partitionResultsMap.get(context.partitionId)!.events; - existingEvents.push(...events.map((event) => event.body)); - } - async processError(err: Error, context: PartitionContext): Promise { - loggerForTest(`processError(${context.partitionId})`); - const errorName = (err as any).code; - if (errorName === "ReceiverDisconnectedError") { - didGetReceiverDisconnectedError = true; + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor implements Required { + async processInitialize(context: PartitionContext): Promise { + loggerForTest(`processInitialize(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.initialized = true; + } + async processClose(reason: CloseReason, context: PartitionContext): Promise { + loggerForTest(`processClose(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.closeReason = reason; + } + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + const existingEvents = partitionResultsMap.get(context.partitionId)!.events; + existingEvents.push(...events.map((event) => event.body)); + } + async processError(err: Error, context: PartitionContext): Promise { + loggerForTest(`processError(${context.partitionId})`); + const errorName = (err as any).code; + if (errorName === "ReceiverDisconnectedError") { + didGetReceiverDisconnectedError = true; + } } } - } - - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } - const processor1LoadBalancingInterval = { - loopIntervalInMs: 1000 - }; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); + } - // working around a potential deadlock - this allows `processor-2` to more - // aggressively pursue getting its required partitions and avoid being in - // lockstep with `processor-1` - const processor2LoadBalancingInterval = { - loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 - }; + const processor1LoadBalancingInterval = { + loopIntervalInMs: 1000 + }; - processorByName[`processor-1`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor1LoadBalancingInterval, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - } - ); + // working around a potential deadlock - this allows `processor-2` to more + // aggressively pursue getting its required partitions and avoid being in + // lockstep with `processor-1` + const processor2LoadBalancingInterval = { + loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 + }; - processorByName[`processor-1`].start(); + processorByName[`processor-1`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor1LoadBalancingInterval, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); - await loopUntil({ - name: "All partitions are owned", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => partitionOwnershipArr.size === partitionIds.length, - errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` - }); + processorByName[`processor-1`].start(); - processorByName[`processor-2`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor2LoadBalancingInterval, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - } - ); + await loopUntil({ + name: "All partitions are owned", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => partitionOwnershipArr.size === partitionIds.length, + errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` + }); - partitionOwnershipArr.size.should.equal(partitionIds.length); - processorByName[`processor-2`].start(); - - await loopUntil({ - name: "Processors are balanced", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => { - // it should be impossible for 'processor-2' to have obtained the number of - // partitions it needed without having stolen some from 'processor-1' - // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing - // hasn't occurred yet. - if (!didGetReceiverDisconnectedError) { - return false; + processorByName[`processor-2`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor2LoadBalancingInterval, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) } + ); - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + partitionOwnershipArr.size.should.equal(partitionIds.length); + processorByName[`processor-2`].start(); - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = ownershipListToMap( - partitionOwnership - ); + await loopUntil({ + name: "Processors are balanced", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => { + // it should be impossible for 'processor-2' to have obtained the number of + // partitions it needed without having stolen some from 'processor-1' + // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing + // hasn't occurred yet. + if (!didGetReceiverDisconnectedError) { + return false; + } - // if stealing has occurred we just want to make sure that _all_ - // the stealing has completed. - const isBalanced = (friendlyName: string): boolean => { - const n = Math.floor(partitionIds.length / 2); - const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! - .length; - return numPartitions === n || numPartitions === n + 1; - }; + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); + + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = ownershipListToMap( + partitionOwnership + ); + + // if stealing has occurred we just want to make sure that _all_ + // the stealing has completed. + const isBalanced = (friendlyName: string): boolean => { + const n = Math.floor(partitionIds.length / 2); + const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! + .length; + return numPartitions === n || numPartitions === n + 1; + }; + + if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { + return false; + } - if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { - return false; + return true; } + }); - return true; + for (const processor in processorByName) { + await processorByName[processor].stop(); } - }); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + // now that all the dust has settled let's make sure that + // a. we received some events from each partition (doesn't matter which processor) + // did the work + // b. each partition was initialized + // c. each partition should have received at least one shutdown event + for (const partitionId of partitionIds) { + const results = partitionResultsMap.get(partitionId)!; + results.events.length.should.be.gte(1); + results.initialized.should.equal(true); + (results.closeReason === CloseReason.Shutdown).should.equal(true); + } + }); - // now that all the dust has settled let's make sure that - // a. we received some events from each partition (doesn't matter which processor) - // did the work - // b. each partition was initialized - // c. each partition should have received at least one shutdown event - for (const partitionId of partitionIds) { - const results = partitionResultsMap.get(partitionId)!; - results.events.length.should.be.gte(1); - results.initialized.should.equal(true); - (results.closeReason === CloseReason.Shutdown).should.equal(true); - } - }); + it("should 'steal' partitions until all the processors have reached a steady-state (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + loggerForTest("starting up the stealing test"); + + const processorByName: Dictionary = {}; + const checkpointStore = new InMemoryCheckpointStore(); + const partitionIds = await producerClient.getPartitionIds(); + const partitionOwnershipArr = new Set(); + + const partitionResultsMap = new Map< + string, + { events: string[]; initialized: boolean; closeReason?: CloseReason } + >(); + partitionIds.forEach((id) => + partitionResultsMap.set(id, { events: [], initialized: false }) + ); + let didGetReceiverDisconnectedError = false; - it("should ensure that all the processors reach a steady-state where all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - const processorByName: Dictionary = {}; - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const partitionOwnershipArr = new Set(); - let didError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor { - async processEvents( - _events: ReceivedEventData[], - context: PartitionContext - ): Promise { - partitionOwnershipArr.add(context.partitionId); + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor implements Required { + async processInitialize(context: PartitionContext): Promise { + loggerForTest(`processInitialize(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.initialized = true; + } + async processClose(reason: CloseReason, context: PartitionContext): Promise { + loggerForTest(`processClose(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.closeReason = reason; + } + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + const existingEvents = partitionResultsMap.get(context.partitionId)!.events; + existingEvents.push(...events.map((event) => event.body)); + } + async processError(err: Error, context: PartitionContext): Promise { + loggerForTest(`processError(${context.partitionId})`); + const errorName = (err as any).code; + if (errorName === "ReceiverDisconnectedError") { + didGetReceiverDisconnectedError = true; + } + } } - async processError(): Promise { - didError = true; + + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); } - } - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } + const processor1LoadBalancingInterval = { + loopIntervalInMs: 1000 + }; - for (let i = 0; i < 2; i++) { - const processorName = `processor-${i}`; - processorByName[processorName] = new EventProcessor( + // working around a potential deadlock - this allows `processor-2` to more + // aggressively pursue getting its required partitions and avoid being in + // lockstep with `processor-1` + const processor2LoadBalancingInterval = { + loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 + }; + + processorByName[`processor-1`] = new EventProcessor( EventHubConsumerClient.defaultConsumerGroupName, consumerClient["_context"], new FooPartitionProcessor(), @@ -1472,82 +1371,22 @@ describe("Event Processor", function(): void { { ...defaultOptions, startPosition: earliestEventPosition, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + ...processor1LoadBalancingInterval, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) } ); - processorByName[processorName].start(); - await delay(12000); - } - - await loopUntil({ - name: "partitionownership", - timeBetweenRunsMs: 5000, - maxTimes: 10, - until: async () => partitionOwnershipArr.size === partitionIds.length - }); - - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = new Map(); - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); - - partitionOwnershipArr.size.should.equal(partitionIds.length); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } - - for (const ownership of partitionOwnership) { - if (!partitionOwnershipMap.has(ownership.ownerId)) { - partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); - } else { - const arr = partitionOwnershipMap.get(ownership.ownerId); - arr!.push(ownership.partitionId); - partitionOwnershipMap.set(ownership.ownerId, arr!); - } - } - - didError.should.equal(false); - const n = Math.floor(partitionIds.length / 2); - partitionOwnershipMap.get(processorByName[`processor-0`].id)!.length.should.oneOf([n, n + 1]); - partitionOwnershipMap.get(processorByName[`processor-1`].id)!.length.should.oneOf([n, n + 1]); - }); + processorByName[`processor-1`].start(); - it("should ensure that all the processors reach a steady-state where all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - const processorByName: Dictionary = {}; - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const partitionOwnershipArr = new Set(); - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor { - async processEvents( - _events: ReceivedEventData[], - context: PartitionContext - ): Promise { - partitionOwnershipArr.add(context.partitionId); - } - async processError(): Promise { - /* no-op */ - } - } - - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId + await loopUntil({ + name: "All partitions are owned", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => partitionOwnershipArr.size === partitionIds.length, + errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` }); - } - for (let i = 0; i < 2; i++) { - const processorName = `processor-${i}`; - processorByName[processorName] = new EventProcessor( + processorByName[`processor-2`] = new EventProcessor( EventHubConsumerClient.defaultConsumerGroupName, consumerClient["_context"], new FooPartitionProcessor(), @@ -1555,425 +1394,623 @@ describe("Event Processor", function(): void { { ...defaultOptions, startPosition: earliestEventPosition, + ...processor2LoadBalancingInterval, loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) } ); - processorByName[processorName].start(); - await delay(12000); - } - - await loopUntil({ - name: "partitionownership", - timeBetweenRunsMs: 5000, - maxTimes: 10, - until: async () => partitionOwnershipArr.size === partitionIds.length - }); - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = new Map(); + partitionOwnershipArr.size.should.equal(partitionIds.length); + processorByName[`processor-2`].start(); - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + await loopUntil({ + name: "Processors are balanced", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => { + // it should be impossible for 'processor-2' to have obtained the number of + // partitions it needed without having stolen some from 'processor-1' + // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing + // hasn't occurred yet. + if (!didGetReceiverDisconnectedError) { + return false; + } - partitionOwnershipArr.size.should.equal(partitionIds.length); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); - for (const ownership of partitionOwnership) { - if (!partitionOwnershipMap.has(ownership.ownerId)) { - partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); - } else { - const arr = partitionOwnershipMap.get(ownership.ownerId); - arr!.push(ownership.partitionId); - partitionOwnershipMap.set(ownership.ownerId, arr!); - } - } + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = ownershipListToMap( + partitionOwnership + ); - const n = Math.floor(partitionIds.length / 2); - partitionOwnershipMap.get(processorByName[`processor-0`].id)!.length.should.oneOf([n, n + 1]); - partitionOwnershipMap.get(processorByName[`processor-1`].id)!.length.should.oneOf([n, n + 1]); - }); + // if stealing has occurred we just want to make sure that _all_ + // the stealing has completed. + const isBalanced = (friendlyName: string): boolean => { + const n = Math.floor(partitionIds.length / 2); + const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! + .length; + return numPartitions === n || numPartitions === n + 1; + }; - it("should ensure that all the processors maintain a steady-state when all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; + if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { + return false; + } - const partitionOwnershipHistory: string[] = []; + return true; + } + }); - let allPartitionsClaimed = false; - let thrashAfterSettling = false; - const handlers: SubscriptionEventHandlers = { - async processInitialize(context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; + for (const processor in processorByName) { + await processorByName[processor].stop(); + } - partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + // now that all the dust has settled let's make sure that + // a. we received some events from each partition (doesn't matter which processor) + // did the work + // b. each partition was initialized + // c. each partition should have received at least one shutdown event + for (const partitionId of partitionIds) { + const results = partitionResultsMap.get(partitionId)!; + results.events.length.should.be.gte(1); + results.initialized.should.equal(true); + (results.closeReason === CloseReason.Shutdown).should.equal(true); + } + }); - loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); - if (allPartitionsClaimed) { - thrashAfterSettling = true; - return; + it("should ensure that all the processors reach a steady-state where all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + const processorByName: Dictionary = {}; + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const partitionOwnershipArr = new Set(); + let didError = false; + + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor { + async processEvents( + _events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); } - - const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); - claimedPartitions.add(partitionId); - claimedPartitionsMap[eventProcessorId] = claimedPartitions; - }, - async processEvents() { - /* no-op */ - }, - async processError() { - /* no-op */ - }, - async processClose(reason, context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - const claimedPartitions = claimedPartitionsMap[eventProcessorId]; - claimedPartitions.delete(partitionId); - loggerForTest( - `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ - context.partitionId - }` - ); - if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { - loggerForTest( - `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` - ); - thrashAfterSettling = true; + async processError(): Promise { + didError = true; } } - }; - const eventProcessorOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 5, - loopIntervalInMs: 1000, - inactiveTimeLimitInMs: 3000, - ownerLevel: 0, - // For this test we don't want to actually checkpoint, just test ownership. - startPosition: latestEventPosition, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - }; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); + } - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); + for (let i = 0; i < 2; i++) { + const processorName = `processor-${i}`; + processorByName[processorName] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); + processorByName[processorName].start(); + await delay(12000); + } - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); + await loopUntil({ + name: "partitionownership", + timeBetweenRunsMs: 5000, + maxTimes: 10, + until: async () => partitionOwnershipArr.size === partitionIds.length + }); - processor1.start(); - processor2.start(); + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = new Map(); - // loop until all partitions are claimed - try { - let lastLoopError: Record = {}; + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); - await loopUntil({ - name: "partitionOwnership", - maxTimes: 30, - timeBetweenRunsMs: 10000, + partitionOwnershipArr.size.should.equal(partitionIds.length); + for (const processor in processorByName) { + await processorByName[processor].stop(); + } - errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), - until: async () => { - // Ensure the partition ownerships are balanced. - const eventProcessorIds = Object.keys(claimedPartitionsMap); - - // There are 2 processors, so we should see 2 entries. - if (eventProcessorIds.length !== 2) { - lastLoopError = { - reason: "Not all event processors have shown up", - eventProcessorIds, - partitionOwnershipHistory - }; - return false; - } + for (const ownership of partitionOwnership) { + if (!partitionOwnershipMap.has(ownership.ownerId)) { + partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); + } else { + const arr = partitionOwnershipMap.get(ownership.ownerId); + arr!.push(ownership.partitionId); + partitionOwnershipMap.set(ownership.ownerId, arr!); + } + } - const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; - const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + didError.should.equal(false); + const n = Math.floor(partitionIds.length / 2); + partitionOwnershipMap + .get(processorByName[`processor-0`].id)! + .length.should.oneOf([n, n + 1]); + partitionOwnershipMap + .get(processorByName[`processor-1`].id)! + .length.should.oneOf([n, n + 1]); + }); - // The delta between number of partitions each processor owns can't be more than 1. - if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { - lastLoopError = { - reason: "Delta between partitions is greater than 1", - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; - return false; - } + it("should ensure that all the processors reach a steady-state where all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + const processorByName: Dictionary = {}; + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const partitionOwnershipArr = new Set(); + + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor { + async processEvents( + _events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + } + async processError(): Promise { + /* no-op */ + } + } - // All partitions must be claimed. - const innerAllPartitionsClaimed = - aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; - - if (!innerAllPartitionsClaimed) { - lastLoopError = { - reason: "All partitions not claimed", - partitionIds, - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); + } + + for (let i = 0; i < 2; i++) { + const processorName = `processor-${i}`; + processorByName[processorName] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) } + ); + processorByName[processorName].start(); + await delay(12000); + } - return innerAllPartitionsClaimed; - } + await loopUntil({ + name: "partitionownership", + timeBetweenRunsMs: 5000, + maxTimes: 10, + until: async () => partitionOwnershipArr.size === partitionIds.length }); - } catch (err) { - // close processors - await Promise.all([processor1.stop(), processor2.stop()]); - throw err; - } - loggerForTest(`All partitions have been claimed.`); - allPartitionsClaimed = true; + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = new Map(); - try { - // loop for some time to see if thrashing occurs - await loopUntil({ - name: "partitionThrash", - maxTimes: 4, - timeBetweenRunsMs: 1000, - until: async () => thrashAfterSettling - }); - } catch (err) { - // swallow error, check trashAfterSettling for the condition in finally - } finally { - await Promise.all([processor1.stop(), processor2.stop()]); - should.equal( - thrashAfterSettling, - false, - "Detected PartitionOwnership thrashing after load-balancing has settled." + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName ); - } - }); - it("should ensure that all the processors maintain a steady-state when all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; + partitionOwnershipArr.size.should.equal(partitionIds.length); + for (const processor in processorByName) { + await processorByName[processor].stop(); + } - const partitionOwnershipHistory: string[] = []; + for (const ownership of partitionOwnership) { + if (!partitionOwnershipMap.has(ownership.ownerId)) { + partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); + } else { + const arr = partitionOwnershipMap.get(ownership.ownerId); + arr!.push(ownership.partitionId); + partitionOwnershipMap.set(ownership.ownerId, arr!); + } + } - let allPartitionsClaimed = false; - let thrashAfterSettling = false; - const handlers: SubscriptionEventHandlers = { - async processInitialize(context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; + const n = Math.floor(partitionIds.length / 2); + partitionOwnershipMap + .get(processorByName[`processor-0`].id)! + .length.should.oneOf([n, n + 1]); + partitionOwnershipMap + .get(processorByName[`processor-1`].id)! + .length.should.oneOf([n, n + 1]); + }); - partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + it("should ensure that all the processors maintain a steady-state when all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; - loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); - if (allPartitionsClaimed) { - thrashAfterSettling = true; - return; - } + const partitionOwnershipHistory: string[] = []; - const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); - claimedPartitions.add(partitionId); - claimedPartitionsMap[eventProcessorId] = claimedPartitions; - }, - async processEvents() { - /* no-op */ - }, - async processError() { - /* no-op */ - }, - async processClose(reason, context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - const claimedPartitions = claimedPartitionsMap[eventProcessorId]; - claimedPartitions.delete(partitionId); - loggerForTest( - `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ - context.partitionId - }` - ); - if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { + let allPartitionsClaimed = false; + let thrashAfterSettling = false; + const handlers: SubscriptionEventHandlers = { + async processInitialize(context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + + partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + + loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); + if (allPartitionsClaimed) { + thrashAfterSettling = true; + return; + } + + const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); + claimedPartitions.add(partitionId); + claimedPartitionsMap[eventProcessorId] = claimedPartitions; + }, + async processEvents() { + /* no-op */ + }, + async processError() { + /* no-op */ + }, + async processClose(reason, context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + const claimedPartitions = claimedPartitionsMap[eventProcessorId]; + claimedPartitions.delete(partitionId); loggerForTest( - `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ + context.partitionId + }` ); - thrashAfterSettling = true; + if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { + loggerForTest( + `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + ); + thrashAfterSettling = true; + } } - } - }; + }; - const eventProcessorOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 5, - loopIntervalInMs: 1000, - inactiveTimeLimitInMs: 3000, - ownerLevel: 0, - // For this test we don't want to actually checkpoint, just test ownership. - startPosition: latestEventPosition, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - }; + const eventProcessorOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 5, + loopIntervalInMs: 1000, + inactiveTimeLimitInMs: 3000, + ownerLevel: 0, + // For this test we don't want to actually checkpoint, just test ownership. + startPosition: latestEventPosition, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + }; - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); - processor1.start(); - processor2.start(); + processor1.start(); + processor2.start(); + + // loop until all partitions are claimed + try { + let lastLoopError: Record = {}; + + await loopUntil({ + name: "partitionOwnership", + maxTimes: 30, + timeBetweenRunsMs: 10000, + + errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), + until: async () => { + // Ensure the partition ownerships are balanced. + const eventProcessorIds = Object.keys(claimedPartitionsMap); + + // There are 2 processors, so we should see 2 entries. + if (eventProcessorIds.length !== 2) { + lastLoopError = { + reason: "Not all event processors have shown up", + eventProcessorIds, + partitionOwnershipHistory + }; + return false; + } - // loop until all partitions are claimed - try { - let lastLoopError: Record = {}; + const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; + const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + + // The delta between number of partitions each processor owns can't be more than 1. + if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { + lastLoopError = { + reason: "Delta between partitions is greater than 1", + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + return false; + } - await loopUntil({ - name: "partitionOwnership", - maxTimes: 30, - timeBetweenRunsMs: 10000, + // All partitions must be claimed. + const innerAllPartitionsClaimed = + aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; + + if (!innerAllPartitionsClaimed) { + lastLoopError = { + reason: "All partitions not claimed", + partitionIds, + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + } - errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), - until: async () => { - // Ensure the partition ownerships are balanced. - const eventProcessorIds = Object.keys(claimedPartitionsMap); - - // There are 2 processors, so we should see 2 entries. - if (eventProcessorIds.length !== 2) { - lastLoopError = { - reason: "Not all event processors have shown up", - eventProcessorIds, - partitionOwnershipHistory - }; - return false; + return innerAllPartitionsClaimed; } + }); + } catch (err) { + // close processors + await Promise.all([processor1.stop(), processor2.stop()]); + throw err; + } - const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; - const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + loggerForTest(`All partitions have been claimed.`); + allPartitionsClaimed = true; - // The delta between number of partitions each processor owns can't be more than 1. - if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { - lastLoopError = { - reason: "Delta between partitions is greater than 1", - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; - return false; - } + try { + // loop for some time to see if thrashing occurs + await loopUntil({ + name: "partitionThrash", + maxTimes: 4, + timeBetweenRunsMs: 1000, + until: async () => thrashAfterSettling + }); + } catch (err) { + // swallow error, check trashAfterSettling for the condition in finally + } finally { + await Promise.all([processor1.stop(), processor2.stop()]); + should.equal( + thrashAfterSettling, + false, + "Detected PartitionOwnership thrashing after load-balancing has settled." + ); + } + }); - // All partitions must be claimed. - const innerAllPartitionsClaimed = - aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; - - if (!innerAllPartitionsClaimed) { - lastLoopError = { - reason: "All partitions not claimed", - partitionIds, - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; + it("should ensure that all the processors maintain a steady-state when all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; + + const partitionOwnershipHistory: string[] = []; + + let allPartitionsClaimed = false; + let thrashAfterSettling = false; + const handlers: SubscriptionEventHandlers = { + async processInitialize(context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + + partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + + loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); + if (allPartitionsClaimed) { + thrashAfterSettling = true; + return; } - return innerAllPartitionsClaimed; + const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); + claimedPartitions.add(partitionId); + claimedPartitionsMap[eventProcessorId] = claimedPartitions; + }, + async processEvents() { + /* no-op */ + }, + async processError() { + /* no-op */ + }, + async processClose(reason, context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + const claimedPartitions = claimedPartitionsMap[eventProcessorId]; + claimedPartitions.delete(partitionId); + loggerForTest( + `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ + context.partitionId + }` + ); + if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { + loggerForTest( + `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + ); + thrashAfterSettling = true; + } } - }); - } catch (err) { - // close processors - await Promise.all([processor1.stop(), processor2.stop()]); - throw err; - } + }; - loggerForTest(`All partitions have been claimed.`); - allPartitionsClaimed = true; + const eventProcessorOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 5, + loopIntervalInMs: 1000, + inactiveTimeLimitInMs: 3000, + ownerLevel: 0, + // For this test we don't want to actually checkpoint, just test ownership. + startPosition: latestEventPosition, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) + }; - try { - // loop for some time to see if thrashing occurs - await loopUntil({ - name: "partitionThrash", - maxTimes: 4, - timeBetweenRunsMs: 1000, - until: async () => thrashAfterSettling - }); - } catch (err) { - // swallow error, check trashAfterSettling for the condition in finally - } finally { - await Promise.all([processor1.stop(), processor2.stop()]); - should.equal( - thrashAfterSettling, - false, - "Detected PartitionOwnership thrashing after load-balancing has settled." + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions ); - } + + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); + + processor1.start(); + processor2.start(); + + // loop until all partitions are claimed + try { + let lastLoopError: Record = {}; + + await loopUntil({ + name: "partitionOwnership", + maxTimes: 30, + timeBetweenRunsMs: 10000, + + errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), + until: async () => { + // Ensure the partition ownerships are balanced. + const eventProcessorIds = Object.keys(claimedPartitionsMap); + + // There are 2 processors, so we should see 2 entries. + if (eventProcessorIds.length !== 2) { + lastLoopError = { + reason: "Not all event processors have shown up", + eventProcessorIds, + partitionOwnershipHistory + }; + return false; + } + + const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; + const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + + // The delta between number of partitions each processor owns can't be more than 1. + if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { + lastLoopError = { + reason: "Delta between partitions is greater than 1", + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + return false; + } + + // All partitions must be claimed. + const innerAllPartitionsClaimed = + aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; + + if (!innerAllPartitionsClaimed) { + lastLoopError = { + reason: "All partitions not claimed", + partitionIds, + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + } + + return innerAllPartitionsClaimed; + } + }); + } catch (err) { + // close processors + await Promise.all([processor1.stop(), processor2.stop()]); + throw err; + } + + loggerForTest(`All partitions have been claimed.`); + allPartitionsClaimed = true; + + try { + // loop for some time to see if thrashing occurs + await loopUntil({ + name: "partitionThrash", + maxTimes: 4, + timeBetweenRunsMs: 1000, + until: async () => thrashAfterSettling + }); + } catch (err) { + // swallow error, check trashAfterSettling for the condition in finally + } finally { + await Promise.all([processor1.stop(), processor2.stop()]); + should.equal( + thrashAfterSettling, + false, + "Detected PartitionOwnership thrashing after load-balancing has settled." + ); + } + }); }); - }); -}).timeout(100000); - -function ownershipListToMap(partitionOwnership: PartitionOwnership[]): Map { - const partitionOwnershipMap: Map = new Map(); - - for (const ownership of partitionOwnership) { - if (!partitionOwnershipMap.has(ownership.ownerId)) { - partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); - } else { - const arr = partitionOwnershipMap.get(ownership.ownerId); - arr!.push(ownership.partitionId); - partitionOwnershipMap.set(ownership.ownerId, arr!); + }).timeout(100000); + + function ownershipListToMap(partitionOwnership: PartitionOwnership[]): Map { + const partitionOwnershipMap: Map = new Map(); + + for (const ownership of partitionOwnership) { + if (!partitionOwnershipMap.has(ownership.ownerId)) { + partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); + } else { + const arr = partitionOwnershipMap.get(ownership.ownerId); + arr!.push(ownership.partitionId); + partitionOwnershipMap.set(ownership.ownerId, arr!); + } } + + return partitionOwnershipMap; } - return partitionOwnershipMap; -} + function triggerAbortedSignalAfterNumCalls(maxCalls: number): AbortSignal { + let count = 0; -function triggerAbortedSignalAfterNumCalls(maxCalls: number): AbortSignal { - let count = 0; + const abortSignal: AbortSignal = { + get aborted(): boolean { + ++count; - const abortSignal: AbortSignal = { - get aborted(): boolean { - ++count; + if (count >= maxCalls) { + return true; + } - if (count >= maxCalls) { - return true; - } + return false; + }, + addEventListener: () => { + /* no-op */ + }, + removeEventListener: () => { + /* no-op */ + }, + onabort: () => { + /* no-op */ + }, + dispatchEvent: () => true + }; - return false; - }, - addEventListener: () => { - /* no-op */ - }, - removeEventListener: () => { - /* no-op */ - }, - onabort: () => { - /* no-op */ - }, - dispatchEvent: () => true - }; - - return abortSignal; -} + return abortSignal; + } +}); diff --git a/sdk/eventhub/event-hubs/test/internal/eventdata.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventdata.spec.ts index a8b1bd7a76b8..a32841c70e3b 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventdata.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventdata.spec.ts @@ -12,6 +12,7 @@ import { valueSectionTypeCode } from "../../src/dataTransformer"; import { AmqpAnnotatedMessage } from "@azure/core-amqp"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; const testAnnotations = { "x-opt-enqueued-time": Date.now(), @@ -43,58 +44,135 @@ const testSourceEventData: EventData = { const messageFromED = toRheaMessage(testSourceEventData); -describe("EventData", function(): void { - describe("fromRheaMessage", function(): void { - it("populates body with the message body", function(): void { - const testEventData = fromRheaMessage(testMessage); - testEventData.body.should.equal(testBody); - }); - - it("populates top-level fields", () => { - const testEventData = fromRheaMessage({ - ...testMessage, - ...{ content_type: "application/json", correlation_id: "cid", message_id: 1 } - }); - should().equal(testEventData.messageId, 1, "Unexpected messageId found."); - should().equal( - testEventData.contentType, - "application/json", - "Unexpected contentType found." - ); - should().equal(testEventData.correlationId, "cid", "Unexpected correlationId found."); - }); - - describe("properties", function(): void { - it("enqueuedTimeUtc gets the enqueued time from system properties", function(): void { +testWithServiceTypes(() => { + describe("EventData", function(): void { + describe("fromRheaMessage", function(): void { + it("populates body with the message body", function(): void { const testEventData = fromRheaMessage(testMessage); - testEventData - .enqueuedTimeUtc!.getTime() - .should.equal(testAnnotations["x-opt-enqueued-time"]); + testEventData.body.should.equal(testBody); }); - it("offset gets the offset from system properties", function(): void { - const testEventData = fromRheaMessage(testMessage); - testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); + it("populates top-level fields", () => { + const testEventData = fromRheaMessage({ + ...testMessage, + ...{ content_type: "application/json", correlation_id: "cid", message_id: 1 } + }); + should().equal(testEventData.messageId, 1, "Unexpected messageId found."); + should().equal( + testEventData.contentType, + "application/json", + "Unexpected contentType found." + ); + should().equal(testEventData.correlationId, "cid", "Unexpected correlationId found."); }); - it("sequenceNumber gets the sequence number from system properties", function(): void { - const testEventData = fromRheaMessage(testMessage); - testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); - }); + describe("properties", function(): void { + it("enqueuedTimeUtc gets the enqueued time from system properties", function(): void { + const testEventData = fromRheaMessage(testMessage); + testEventData + .enqueuedTimeUtc!.getTime() + .should.equal(testAnnotations["x-opt-enqueued-time"]); + }); - it("partitionKey gets the sequence number from system properties", function(): void { - const testEventData = fromRheaMessage(testMessage); - testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); + it("offset gets the offset from system properties", function(): void { + const testEventData = fromRheaMessage(testMessage); + testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); + }); + + it("sequenceNumber gets the sequence number from system properties", function(): void { + const testEventData = fromRheaMessage(testMessage); + testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); + }); + + it("partitionKey gets the sequence number from system properties", function(): void { + const testEventData = fromRheaMessage(testMessage); + testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); + }); + + it("returns systemProperties for unknown message annotations", function(): void { + const extraAnnotations = { + "x-iot-foo-prop": "just-a-foo", + "x-iot-bar-prop": "bar-above-the-rest" + }; + const testEventData = fromRheaMessage({ + body: testBody, + application_properties: applicationProperties, + message_annotations: { + ...testAnnotations, + ...extraAnnotations + } + }); + testEventData + .enqueuedTimeUtc!.getTime() + .should.equal(testAnnotations["x-opt-enqueued-time"]); + testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); + testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); + testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); + testEventData.systemProperties!["x-iot-foo-prop"].should.eql( + extraAnnotations["x-iot-foo-prop"] + ); + testEventData.systemProperties!["x-iot-bar-prop"].should.eql( + extraAnnotations["x-iot-bar-prop"] + ); + }); + + it("returns systemProperties for special known properties", function(): void { + const testEventData = fromRheaMessage({ + body: testBody, + application_properties: applicationProperties, + message_annotations: testAnnotations, + message_id: "messageId", + user_id: "userId", + to: "to", + subject: "subject", + reply_to: "replyTo", + reply_to_group_id: "replyToGroupId", + content_encoding: "utf-8", + content_type: "application/json", + correlation_id: "id2", + absolute_expiry_time: new Date(0), + creation_time: new Date(0), + group_id: "groupId", + group_sequence: 1 + }); + + testEventData + .enqueuedTimeUtc!.getTime() + .should.equal(testAnnotations["x-opt-enqueued-time"]); + testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); + testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); + testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); + testEventData.systemProperties!["messageId"].should.equal("messageId"); + testEventData.systemProperties!["userId"].should.equal("userId"); + testEventData.systemProperties!["to"].should.equal("to"); + testEventData.systemProperties!["subject"].should.equal("subject"); + testEventData.systemProperties!["replyTo"].should.equal("replyTo"); + testEventData.systemProperties!["replyToGroupId"].should.equal("replyToGroupId"); + testEventData.systemProperties!["contentEncoding"].should.equal("utf-8"); + testEventData.systemProperties!["contentType"].should.equal("application/json"); + testEventData.systemProperties!["correlationId"].should.equal("id2"); + testEventData.systemProperties!["absoluteExpiryTime"].should.equal(0); + testEventData.systemProperties!["creationTime"].should.equal(0); + testEventData.systemProperties!["groupId"].should.equal("groupId"); + testEventData.systemProperties!["groupSequence"].should.equal(1); + }); }); - it("returns systemProperties for unknown message annotations", function(): void { + it("deserializes Dates to numbers in properties and annotations", () => { + const timestamp = new Date(); const extraAnnotations = { - "x-iot-foo-prop": "just-a-foo", - "x-iot-bar-prop": "bar-above-the-rest" + "x-date": timestamp, + "x-number": timestamp.getTime() }; const testEventData = fromRheaMessage({ body: testBody, - application_properties: applicationProperties, + application_properties: { + topLevelDate: timestamp, + child: { + nestedDate: timestamp, + children: [timestamp, { deepDate: timestamp }] + } + }, message_annotations: { ...testAnnotations, ...extraAnnotations @@ -106,210 +184,137 @@ describe("EventData", function(): void { testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); - testEventData.systemProperties!["x-iot-foo-prop"].should.eql( - extraAnnotations["x-iot-foo-prop"] + testEventData.systemProperties!["x-date"].should.eql(extraAnnotations["x-date"].getTime()); + testEventData.systemProperties!["x-number"].should.eql(extraAnnotations["x-number"]); + testEventData.properties!.should.eql({ + topLevelDate: timestamp.getTime(), + child: { + nestedDate: timestamp.getTime(), + children: [timestamp.getTime(), { deepDate: timestamp.getTime() }] + } + }); + }); + }); + describe("toAmqpMessage", function(): void { + it("populates body with the message body encoded", function(): void { + const expectedTestBodyContents = Buffer.from(JSON.stringify(testBody)); + should().equal( + expectedTestBodyContents.equals(messageFromED.body.content), + true, + "Encoded body does not match expected result." ); - testEventData.systemProperties!["x-iot-bar-prop"].should.eql( - extraAnnotations["x-iot-bar-prop"] + should().equal( + messageFromED.body.typecode, + dataSectionTypeCode, + "Unexpected typecode encountered on body." ); }); - it("returns systemProperties for special known properties", function(): void { - const testEventData = fromRheaMessage({ - body: testBody, - application_properties: applicationProperties, - message_annotations: testAnnotations, - message_id: "messageId", - user_id: "userId", - to: "to", - subject: "subject", - reply_to: "replyTo", - reply_to_group_id: "replyToGroupId", - content_encoding: "utf-8", - content_type: "application/json", - correlation_id: "id2", - absolute_expiry_time: new Date(0), - creation_time: new Date(0), - group_id: "groupId", - group_sequence: 1 + it("populates top-level fields", () => { + const message = toRheaMessage({ + ...testSourceEventData, + ...{ contentType: "application/json", correlationId: "cid", messageId: 1 } }); - - testEventData - .enqueuedTimeUtc!.getTime() - .should.equal(testAnnotations["x-opt-enqueued-time"]); - testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); - testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); - testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); - testEventData.systemProperties!["messageId"].should.equal("messageId"); - testEventData.systemProperties!["userId"].should.equal("userId"); - testEventData.systemProperties!["to"].should.equal("to"); - testEventData.systemProperties!["subject"].should.equal("subject"); - testEventData.systemProperties!["replyTo"].should.equal("replyTo"); - testEventData.systemProperties!["replyToGroupId"].should.equal("replyToGroupId"); - testEventData.systemProperties!["contentEncoding"].should.equal("utf-8"); - testEventData.systemProperties!["contentType"].should.equal("application/json"); - testEventData.systemProperties!["correlationId"].should.equal("id2"); - testEventData.systemProperties!["absoluteExpiryTime"].should.equal(0); - testEventData.systemProperties!["creationTime"].should.equal(0); - testEventData.systemProperties!["groupId"].should.equal("groupId"); - testEventData.systemProperties!["groupSequence"].should.equal(1); - }); - }); - - it("deserializes Dates to numbers in properties and annotations", () => { - const timestamp = new Date(); - const extraAnnotations = { - "x-date": timestamp, - "x-number": timestamp.getTime() - }; - const testEventData = fromRheaMessage({ - body: testBody, - application_properties: { - topLevelDate: timestamp, - child: { - nestedDate: timestamp, - children: [timestamp, { deepDate: timestamp }] - } - }, - message_annotations: { - ...testAnnotations, - ...extraAnnotations - } - }); - testEventData.enqueuedTimeUtc!.getTime().should.equal(testAnnotations["x-opt-enqueued-time"]); - testEventData.offset!.should.equal(testAnnotations["x-opt-offset"]); - testEventData.sequenceNumber!.should.equal(testAnnotations["x-opt-sequence-number"]); - testEventData.partitionKey!.should.equal(testAnnotations["x-opt-partition-key"]); - testEventData.systemProperties!["x-date"].should.eql(extraAnnotations["x-date"].getTime()); - testEventData.systemProperties!["x-number"].should.eql(extraAnnotations["x-number"]); - testEventData.properties!.should.eql({ - topLevelDate: timestamp.getTime(), - child: { - nestedDate: timestamp.getTime(), - children: [timestamp.getTime(), { deepDate: timestamp.getTime() }] - } + should().equal(message.message_id, 1, "Unexpected message_id found."); + should().equal(message.content_type, "application/json", "Unexpected content_type found."); + should().equal(message.correlation_id, "cid", "Unexpected correlation_id found."); }); - }); - }); - describe("toAmqpMessage", function(): void { - it("populates body with the message body encoded", function(): void { - const expectedTestBodyContents = Buffer.from(JSON.stringify(testBody)); - should().equal( - expectedTestBodyContents.equals(messageFromED.body.content), - true, - "Encoded body does not match expected result." - ); - should().equal( - messageFromED.body.typecode, - dataSectionTypeCode, - "Unexpected typecode encountered on body." - ); - }); - it("populates top-level fields", () => { - const message = toRheaMessage({ - ...testSourceEventData, - ...{ contentType: "application/json", correlationId: "cid", messageId: 1 } + it("populates application_properties of the message", function(): void { + messageFromED.application_properties!.should.equal(properties); }); - should().equal(message.message_id, 1, "Unexpected message_id found."); - should().equal(message.content_type, "application/json", "Unexpected content_type found."); - should().equal(message.correlation_id, "cid", "Unexpected correlation_id found."); - }); - - it("populates application_properties of the message", function(): void { - messageFromED.application_properties!.should.equal(properties); - }); - it("AmqpAnnotatedMessage (explicit type)", () => { - const amqpAnnotatedMessage: AmqpAnnotatedMessage = { - body: "hello", - bodyType: "value" - }; + it("AmqpAnnotatedMessage (explicit type)", () => { + const amqpAnnotatedMessage: AmqpAnnotatedMessage = { + body: "hello", + bodyType: "value" + }; - const rheaMessage = toRheaMessage(amqpAnnotatedMessage); + const rheaMessage = toRheaMessage(amqpAnnotatedMessage); - assert.equal(rheaMessage.body.typecode, valueSectionTypeCode); - }); + assert.equal(rheaMessage.body.typecode, valueSectionTypeCode); + }); - it("AmqpAnnotatedMessage (implicit type)", () => { - const amqpAnnotatedMessage: AmqpAnnotatedMessage = { - body: "hello", - bodyType: undefined - }; + it("AmqpAnnotatedMessage (implicit type)", () => { + const amqpAnnotatedMessage: AmqpAnnotatedMessage = { + body: "hello", + bodyType: undefined + }; - const rheaMessage = toRheaMessage(amqpAnnotatedMessage); + const rheaMessage = toRheaMessage(amqpAnnotatedMessage); - assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); - }); + assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); + }); - it("EventData", () => { - const event: EventData = { - body: "hello" - }; + it("EventData", () => { + const event: EventData = { + body: "hello" + }; - const rheaMessage = toRheaMessage(event); + const rheaMessage = toRheaMessage(event); - assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); - }); + assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); + }); - it("ReceivedEventData (sequence)", () => { - const event: ReceivedEventData = { - enqueuedTimeUtc: new Date(), - offset: 100, - partitionKey: null, - sequenceNumber: 1, - body: ["foo", "bar"], - getRawAmqpMessage() { - return { - body: this.body, - bodyType: "sequence" - }; - } - }; + it("ReceivedEventData (sequence)", () => { + const event: ReceivedEventData = { + enqueuedTimeUtc: new Date(), + offset: 100, + partitionKey: null, + sequenceNumber: 1, + body: ["foo", "bar"], + getRawAmqpMessage() { + return { + body: this.body, + bodyType: "sequence" + }; + } + }; - const rheaMessage = toRheaMessage(event); + const rheaMessage = toRheaMessage(event); - assert.equal(rheaMessage.body.typecode, sequenceSectionTypeCode); - }); + assert.equal(rheaMessage.body.typecode, sequenceSectionTypeCode); + }); - it("ReceivedEventData (data)", () => { - const event: ReceivedEventData = { - enqueuedTimeUtc: new Date(), - offset: 100, - partitionKey: null, - sequenceNumber: 1, - body: ["foo", "bar"], - getRawAmqpMessage() { - return { - body: this.body, - bodyType: "data" - }; - } - }; + it("ReceivedEventData (data)", () => { + const event: ReceivedEventData = { + enqueuedTimeUtc: new Date(), + offset: 100, + partitionKey: null, + sequenceNumber: 1, + body: ["foo", "bar"], + getRawAmqpMessage() { + return { + body: this.body, + bodyType: "data" + }; + } + }; - const rheaMessage = toRheaMessage(event); + const rheaMessage = toRheaMessage(event); - assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); - }); + assert.equal(rheaMessage.body.typecode, dataSectionTypeCode); + }); - it("ReceivedEventData (value)", () => { - const event: ReceivedEventData = { - enqueuedTimeUtc: new Date(), - offset: 100, - partitionKey: null, - sequenceNumber: 1, - body: ["foo", "bar"], - getRawAmqpMessage() { - return { - body: this.body, - bodyType: "value" - }; - } - }; + it("ReceivedEventData (value)", () => { + const event: ReceivedEventData = { + enqueuedTimeUtc: new Date(), + offset: 100, + partitionKey: null, + sequenceNumber: 1, + body: ["foo", "bar"], + getRawAmqpMessage() { + return { + body: this.body, + bodyType: "value" + }; + } + }; - const rheaMessage = toRheaMessage(event); + const rheaMessage = toRheaMessage(event); - assert.equal(rheaMessage.body.typecode, valueSectionTypeCode); + assert.equal(rheaMessage.body.typecode, valueSectionTypeCode); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/impl/partitionGate.spec.ts b/sdk/eventhub/event-hubs/test/internal/impl/partitionGate.spec.ts index 9ed7e1efb4c7..8893db9cabad 100644 --- a/sdk/eventhub/event-hubs/test/internal/impl/partitionGate.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/impl/partitionGate.spec.ts @@ -3,24 +3,27 @@ import { PartitionGate } from "../../../src/impl/partitionGate"; import chai from "chai"; +import { testWithServiceTypes } from "../../public/utils/testWithServiceTypes"; const should = chai.should(); -describe("PartitionGate", () => { - it("add", () => { - const gate = new PartitionGate(); +testWithServiceTypes(() => { + describe("PartitionGate", () => { + it("add", () => { + const gate = new PartitionGate(); - gate.add("all"); + gate.add("all"); - // all supercedes everything else - should.throw(() => gate.add("all"), /Partition already has a subscriber/); - should.throw(() => gate.add("0"), /Partition already has a subscriber/); + // all supercedes everything else + should.throw(() => gate.add("all"), /Partition already has a subscriber/); + should.throw(() => gate.add("0"), /Partition already has a subscriber/); - gate.remove("all"); + gate.remove("all"); - gate.add("0"); - gate.add("1"); // and it's okay to add non-conflicting partitions + gate.add("0"); + gate.add("1"); // and it's okay to add non-conflicting partitions - should.throw(() => gate.add("all"), /Partition already has a subscriber/); - should.throw(() => gate.add("0"), /Partition already has a subscriber/); + should.throw(() => gate.add("all"), /Partition already has a subscriber/); + should.throw(() => gate.add("0"), /Partition already has a subscriber/); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/loadBalancingStrategy.spec.ts b/sdk/eventhub/event-hubs/test/internal/loadBalancingStrategy.spec.ts index 016e62cfc33a..205081f7c060 100644 --- a/sdk/eventhub/event-hubs/test/internal/loadBalancingStrategy.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/loadBalancingStrategy.spec.ts @@ -6,600 +6,603 @@ import { PartitionOwnership } from "../../src"; import { BalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/balancedStrategy"; import { GreedyLoadBalancingStrategy } from "../../src/loadBalancerStrategies/greedyStrategy"; import { UnbalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/unbalancedStrategy"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; const should = chai.should(); -describe("LoadBalancingStrategy", () => { - function createOwnershipMap( - partitionToOwner: Record - ): Map { - const ownershipMap = new Map(); - - for (const partitionId in partitionToOwner) { - ownershipMap.set(partitionId, { - consumerGroup: "$Default", - eventHubName: "eventhubname1", - fullyQualifiedNamespace: "fqdn", - ownerId: partitionToOwner[partitionId], - partitionId: partitionId, - etag: "etag", - lastModifiedTimeInMs: Date.now() - }); - } +testWithServiceTypes(() => { + describe("LoadBalancingStrategy", () => { + function createOwnershipMap( + partitionToOwner: Record + ): Map { + const ownershipMap = new Map(); + + for (const partitionId in partitionToOwner) { + ownershipMap.set(partitionId, { + consumerGroup: "$Default", + eventHubName: "eventhubname1", + fullyQualifiedNamespace: "fqdn", + ownerId: partitionToOwner[partitionId], + partitionId: partitionId, + etag: "etag", + lastModifiedTimeInMs: Date.now() + }); + } - return ownershipMap; - } + return ownershipMap; + } - describe("UnbalancedLoadBalancingStrategy", () => { - it("all", () => { - const m = new Map(); - const lb = new UnbalancedLoadBalancingStrategy(); + describe("UnbalancedLoadBalancingStrategy", () => { + it("all", () => { + const m = new Map(); + const lb = new UnbalancedLoadBalancingStrategy(); - lb.getPartitionsToCliam("ownerId", m, ["1", "2", "3"]).should.deep.eq(["1", "2", "3"]); - should.equal(m.size, 0); - }); - - it("claim partitions we already own", () => { - const m = new Map(); - - m.set("1", { - consumerGroup: "", - fullyQualifiedNamespace: "", - eventHubName: "", - // we already own this so we won't - // try to reclaim it. - ownerId: "ownerId", - partitionId: "" + lb.getPartitionsToCliam("ownerId", m, ["1", "2", "3"]).should.deep.eq(["1", "2", "3"]); + should.equal(m.size, 0); }); - m.set("2", { - consumerGroup: "", - fullyQualifiedNamespace: "", - eventHubName: "", - // owned by someone else - we'll steal this - // partition - ownerId: "someOtherOwnerId", - partitionId: "" + it("claim partitions we already own", () => { + const m = new Map(); + + m.set("1", { + consumerGroup: "", + fullyQualifiedNamespace: "", + eventHubName: "", + // we already own this so we won't + // try to reclaim it. + ownerId: "ownerId", + partitionId: "" + }); + + m.set("2", { + consumerGroup: "", + fullyQualifiedNamespace: "", + eventHubName: "", + // owned by someone else - we'll steal this + // partition + ownerId: "someOtherOwnerId", + partitionId: "" + }); + + const lb = new UnbalancedLoadBalancingStrategy(); + + lb.getPartitionsToCliam("ownerId", m, ["1", "2", "3"]).should.deep.eq(["1", "2", "3"]); }); - - const lb = new UnbalancedLoadBalancingStrategy(); - - lb.getPartitionsToCliam("ownerId", m, ["1", "2", "3"]).should.deep.eq(["1", "2", "3"]); }); - }); - describe("BalancedLoadBalancingStrategy", () => { - const lb = new BalancedLoadBalancingStrategy(1000 * 60); - - it("odd number of partitions per processor", () => { - const allPartitions = ["0", "1", "2"]; - - // at this point 'a' has it's fair share of partitions (there are 3 total) - // and it's okay to have 1 extra. - let partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - [], - "we've gotten our fair share, shouldn't claim anything new" - ); - - // now the other side of this is when we're fighting for the ownership of an - // extra partition - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - ["0"], - "we had our minimum fair share (1) but there's still one extra (uneven number of partitions per processor) and we should snag it" - ); - }); + describe("BalancedLoadBalancingStrategy", () => { + const lb = new BalancedLoadBalancingStrategy(1000 * 60); + + it("odd number of partitions per processor", () => { + const allPartitions = ["0", "1", "2"]; + + // at this point 'a' has it's fair share of partitions (there are 3 total) + // and it's okay to have 1 extra. + let partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + [], + "we've gotten our fair share, shouldn't claim anything new" + ); + + // now the other side of this is when we're fighting for the ownership of an + // extra partition + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + ["0"], + "we had our minimum fair share (1) but there's still one extra (uneven number of partitions per processor) and we should snag it" + ); + }); - it("even number of partitions per processor", () => { - const allPartitions = ["0", "1", "2", "3"]; + it("even number of partitions per processor", () => { + const allPartitions = ["0", "1", "2", "3"]; + + // at this point 'a' has it's fair share of partitions (there are 4 total) + // so it'll stop claiming additional partitions. + let partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + [], + "we've gotten our fair share, shouldn't claim anything new" + ); + + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "0": "b", + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal([], "load is balanced, won't grab any more."); + }); - // at this point 'a' has it's fair share of partitions (there are 4 total) - // so it'll stop claiming additional partitions. - let partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - [], - "we've gotten our fair share, shouldn't claim anything new" - ); - - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "0": "b", - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal([], "load is balanced, won't grab any more."); - }); + // when there are no freely available partitions (partitions that have either expired or are literally unowned) + // we'll need to steal from an existing processor. + // This can happen in a few ways: + // 1. we were simply racing against other processors + // 2. we're coming in later after all partitions have been allocated (ie, scaling out) + // 3. timing issues, death of a processor, etc... + it("stealing", () => { + // something like this could happen if 'a' were just the only processor + // and now we're spinning up 'b' + let partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "a", + "2": "a" + }), + ["0", "1", "2"] + ); + partitionsToOwn.sort(); + // we'll attempt to steal a partition from 'a'. + partitionsToOwn.length.should.equal( + 1, + "stealing with an odd number of partitions per processor" + ); + + // and now the same case as above, but with an even number of partitions per processor. + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "a", + "2": "a", + "3": "a" + }), + ["0", "1", "2", "3"] + ); + partitionsToOwn.sort(); + // we'll attempt to steal a partition from 'a'. + partitionsToOwn.length.should.equal( + 1, + "stealing with an even number of partitions per processor" + ); + }); - // when there are no freely available partitions (partitions that have either expired or are literally unowned) - // we'll need to steal from an existing processor. - // This can happen in a few ways: - // 1. we were simply racing against other processors - // 2. we're coming in later after all partitions have been allocated (ie, scaling out) - // 3. timing issues, death of a processor, etc... - it("stealing", () => { - // something like this could happen if 'a' were just the only processor - // and now we're spinning up 'b' - let partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "a", - "1": "a", - "2": "a" - }), - ["0", "1", "2"] - ); - partitionsToOwn.sort(); - // we'll attempt to steal a partition from 'a'. - partitionsToOwn.length.should.equal( - 1, - "stealing with an odd number of partitions per processor" - ); - - // and now the same case as above, but with an even number of partitions per processor. - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ + it("don't steal when you can just wait", () => { + // @chradek's case: let's say we have this partition layout: + // AAAABBBCCD + // + // Before, we'd let 'C' steal from 'A' - we see that we don't have enough + // +1 processors(exact match) and so 'C' attempts to become one. This can + // lead to some unnecessary thrash as 'A' loses partitions to a processor + // that has technically already met it's quota. + // + // Instead, we treat 'A' is a +1-ish specifically for when we ('C') + // are checking if we want to grab more partitions. + // + // This allows 'A' to just naturally decline as _actual_ processors grab + // their minimum required partitions rather than forcing it and possibly + // having a partition have to juggle between partitions as they try to + // meet the minimum. + const partitions = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]; + + const lbs = new BalancedLoadBalancingStrategy(1000 * 60); + + // we'll do 4 consumers + const initialOwnershipMap = createOwnershipMap({ "0": "a", "1": "a", "2": "a", - "3": "a" - }), - ["0", "1", "2", "3"] - ); - partitionsToOwn.sort(); - // we'll attempt to steal a partition from 'a'. - partitionsToOwn.length.should.equal( - 1, - "stealing with an even number of partitions per processor" - ); - }); + "3": "a", - it("don't steal when you can just wait", () => { - // @chradek's case: let's say we have this partition layout: - // AAAABBBCCD - // - // Before, we'd let 'C' steal from 'A' - we see that we don't have enough - // +1 processors(exact match) and so 'C' attempts to become one. This can - // lead to some unnecessary thrash as 'A' loses partitions to a processor - // that has technically already met it's quota. - // - // Instead, we treat 'A' is a +1-ish specifically for when we ('C') - // are checking if we want to grab more partitions. - // - // This allows 'A' to just naturally decline as _actual_ processors grab - // their minimum required partitions rather than forcing it and possibly - // having a partition have to juggle between partitions as they try to - // meet the minimum. - const partitions = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]; - - const lbs = new BalancedLoadBalancingStrategy(1000 * 60); - - // we'll do 4 consumers - const initialOwnershipMap = createOwnershipMap({ - "0": "a", - "1": "a", - "2": "a", - "3": "a", - - "4": "b", - "5": "b", - "6": "b", - - "7": "c", - "8": "c", - - "9": "d" - }); - - const requestedPartitions = lbs.getPartitionsToCliam("c", initialOwnershipMap, partitions); - requestedPartitions.sort(); + "4": "b", + "5": "b", + "6": "b", - requestedPartitions.should.deep.equal( - [], - "c will not steal one partition since it sees that, eventually, 'a' will lose its partitions and become a +1 processor on it's own" - ); - }); + "7": "c", + "8": "c", - it("avoid thrash", () => { - // this is a case where we shouldn't steal - we have - // the minimum number of partitions and stealing at this - // point will just keep thrashing both processors. - const partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "a", - "1": "b", - "2": "a" - }), - ["0", "1", "2"] - ); - - partitionsToOwn.sort(); - partitionsToOwn.should.deep.equal([], "should not re-steal when things are balanced"); - }); + "9": "d" + }); - it("general cases", () => { - const allPartitions = ["0", "1", "2", "3"]; - - // in the presence of no owners we claim a random partition - let partitionsToOwn = lb.getPartitionsToCliam("a", createOwnershipMap({}), allPartitions); - partitionsToOwn.length.should.be.equal(1, "nothing is owned, claim one"); - - // if there are other owners we should claim up to #partitions/#owners - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.length.should.be.equal(1, "1 and 1 with another owner, should claim one"); - // better not try to claim 'b's partition when there are unowned partitions - partitionsToOwn.filter((p) => p === "1").length.should.equal(0); - - // 'b' should claim the last unowned partition - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal(["0"], "b grabbed the last available partition"); - - // we're balanced - processors now only grab the partitions that they own - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "b", - "1": "a", - "2": "b", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal([], "balanced: b should not grab anymore partitions"); - }); + const requestedPartitions = lbs.getPartitionsToCliam("c", initialOwnershipMap, partitions); + requestedPartitions.sort(); - it("honors the partitionOwnershipExpirationIntervalInMs", () => { - const intervalInMs = 1000; - const lbs = new BalancedLoadBalancingStrategy(intervalInMs); - const allPartitions = ["0", "1"]; - const ownershipMap = createOwnershipMap({ - "0": "b", - "1": "a" + requestedPartitions.should.deep.equal( + [], + "c will not steal one partition since it sees that, eventually, 'a' will lose its partitions and become a +1 processor on it's own" + ); }); - // At this point, 'a' has its fair share of partitions, and none should be returned. - let partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); - partitionsToOwn.length.should.equal(0, "Expected to not claim any new partitions."); + it("avoid thrash", () => { + // this is a case where we shouldn't steal - we have + // the minimum number of partitions and stealing at this + // point will just keep thrashing both processors. + const partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "b", + "2": "a" + }), + ["0", "1", "2"] + ); + + partitionsToOwn.sort(); + partitionsToOwn.should.deep.equal([], "should not re-steal when things are balanced"); + }); - // Change the ownership of partition "0" so it is older than the interval. - const ownership = ownershipMap.get("0")!; - ownership.lastModifiedTimeInMs = Date.now() - (intervalInMs + 1); // Add 1 to the interval to ensure it has just expired. + it("general cases", () => { + const allPartitions = ["0", "1", "2", "3"]; + + // in the presence of no owners we claim a random partition + let partitionsToOwn = lb.getPartitionsToCliam("a", createOwnershipMap({}), allPartitions); + partitionsToOwn.length.should.be.equal(1, "nothing is owned, claim one"); + + // if there are other owners we should claim up to #partitions/#owners + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.length.should.be.equal(1, "1 and 1 with another owner, should claim one"); + // better not try to claim 'b's partition when there are unowned partitions + partitionsToOwn.filter((p) => p === "1").length.should.equal(0); + + // 'b' should claim the last unowned partition + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal(["0"], "b grabbed the last available partition"); + + // we're balanced - processors now only grab the partitions that they own + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "b", + "1": "a", + "2": "b", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal([], "balanced: b should not grab anymore partitions"); + }); - partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); - partitionsToOwn.should.deep.equal(["0"]); - }); - }); + it("honors the partitionOwnershipExpirationIntervalInMs", () => { + const intervalInMs = 1000; + const lbs = new BalancedLoadBalancingStrategy(intervalInMs); + const allPartitions = ["0", "1"]; + const ownershipMap = createOwnershipMap({ + "0": "b", + "1": "a" + }); - describe("GreedyLoadBalancingStrategy", () => { - const lb = new GreedyLoadBalancingStrategy(1000 * 60); + // At this point, 'a' has its fair share of partitions, and none should be returned. + let partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); + partitionsToOwn.length.should.equal(0, "Expected to not claim any new partitions."); - it("odd number of partitions per processor", () => { - const allPartitions = ["0", "1", "2"]; + // Change the ownership of partition "0" so it is older than the interval. + const ownership = ownershipMap.get("0")!; + ownership.lastModifiedTimeInMs = Date.now() - (intervalInMs + 1); // Add 1 to the interval to ensure it has just expired. - // at this point 'a' has it's fair share of partitions (there are 3 total) - // and it's okay to have 1 extra. - let partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - [], - "we've gotten our fair share, shouldn't claim anything new" - ); - - // now the other side of this is when we're fighting for the ownership of an - // extra partition - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - ["0"], - "we had our minimum fair share (1) but there's still one extra (uneven number of partitions per processor) and we should snag it" - ); + partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); + partitionsToOwn.should.deep.equal(["0"]); + }); }); - it("even number of partitions per processor", () => { - const allPartitions = ["0", "1", "2", "3"]; + describe("GreedyLoadBalancingStrategy", () => { + const lb = new GreedyLoadBalancingStrategy(1000 * 60); + + it("odd number of partitions per processor", () => { + const allPartitions = ["0", "1", "2"]; + + // at this point 'a' has it's fair share of partitions (there are 3 total) + // and it's okay to have 1 extra. + let partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + [], + "we've gotten our fair share, shouldn't claim anything new" + ); + + // now the other side of this is when we're fighting for the ownership of an + // extra partition + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + ["0"], + "we had our minimum fair share (1) but there's still one extra (uneven number of partitions per processor) and we should snag it" + ); + }); - // at this point 'a' has it's fair share of partitions (there are 4 total) - // so it'll stop claiming additional partitions. - let partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal( - [], - "we've gotten our fair share, shouldn't claim anything new" - ); - - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "0": "b", - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal([], "load is balanced, won't grab any more."); - }); + it("even number of partitions per processor", () => { + const allPartitions = ["0", "1", "2", "3"]; + + // at this point 'a' has it's fair share of partitions (there are 4 total) + // so it'll stop claiming additional partitions. + let partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal( + [], + "we've gotten our fair share, shouldn't claim anything new" + ); + + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "0": "b", + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal([], "load is balanced, won't grab any more."); + }); - // when there are no freely available partitions (partitions that have either expired or are literally unowned) - // we'll need to steal from an existing processor. - // This can happen in a few ways: - // 1. we were simply racing against other processors - // 2. we're coming in later after all partitions have been allocated (ie, scaling out) - // 3. timing issues, death of a processor, etc... - it("stealing", () => { - // something like this could happen if 'a' were just the only processor - // and now we're spinning up 'b' - let partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "a", - "1": "a", - "2": "a" - }), - ["0", "1", "2"] - ); - partitionsToOwn.sort(); - // we'll attempt to steal a partition from 'a'. - partitionsToOwn.length.should.equal( - 1, - "stealing with an odd number of partitions per processor" - ); - - // and now the same case as above, but with an even number of partitions per processor. - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ + // when there are no freely available partitions (partitions that have either expired or are literally unowned) + // we'll need to steal from an existing processor. + // This can happen in a few ways: + // 1. we were simply racing against other processors + // 2. we're coming in later after all partitions have been allocated (ie, scaling out) + // 3. timing issues, death of a processor, etc... + it("stealing", () => { + // something like this could happen if 'a' were just the only processor + // and now we're spinning up 'b' + let partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "a", + "2": "a" + }), + ["0", "1", "2"] + ); + partitionsToOwn.sort(); + // we'll attempt to steal a partition from 'a'. + partitionsToOwn.length.should.equal( + 1, + "stealing with an odd number of partitions per processor" + ); + + // and now the same case as above, but with an even number of partitions per processor. + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "a", + "2": "a", + "3": "a" + }), + ["0", "1", "2", "3"] + ); + partitionsToOwn.sort(); + // we'll attempt to steal a partition from 'a'. + partitionsToOwn.length.should.equal( + 2, + "stealing with an even number of partitions per processor" + ); + }); + + it("claims unowned then steals", () => { + const allPartitions = []; + for (let i = 0; i < 8; i++) { + allPartitions.push(`${i}`); + } + + const partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "0": "", + // skip 1, 2 + "3": "b", + "4": "b", + "5": "b", + "6": "b", + "7": "b" + }), + allPartitions + ); + partitionsToOwn.sort(); + // "a" should have 4 partitions in order to be balanced. + // Partitions "0", "1", "2" should be chosen before any are stolen. + partitionsToOwn.length.should.equal(4, "should have claimed half of the partitions."); + partitionsToOwn + .slice(0, 3) + .should.deep.equal(["0", "1", "2"], "should have claimed unclaimed partitions first."); + }); + + it("don't steal when you can just wait", () => { + // @chradek's case: let's say we have this partition layout: + // AAAABBBCCD + // + // Before, we'd let 'C' steal from 'A' - we see that we don't have enough + // +1 processors(exact match) and so 'C' attempts to become one. This can + // lead to some unnecessary thrash as 'A' loses partitions to a processor + // that has technically already met it's quota. + // + // Instead, we treat 'A' is a +1-ish specifically for when we ('C') + // are checking if we want to grab more partitions. + // + // This allows 'A' to just naturally decline as _actual_ processors grab + // their minimum required partitions rather than forcing it and possibly + // having a partition have to juggle between partitions as they try to + // meet the minimum. + const partitions = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]; + + const lbs = new BalancedLoadBalancingStrategy(1000 * 60); + + // we'll do 4 consumers + const initialOwnershipMap = createOwnershipMap({ "0": "a", "1": "a", "2": "a", - "3": "a" - }), - ["0", "1", "2", "3"] - ); - partitionsToOwn.sort(); - // we'll attempt to steal a partition from 'a'. - partitionsToOwn.length.should.equal( - 2, - "stealing with an even number of partitions per processor" - ); - }); - - it("claims unowned then steals", () => { - const allPartitions = []; - for (let i = 0; i < 8; i++) { - allPartitions.push(`${i}`); - } + "3": "a", - const partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "0": "", - // skip 1, 2 - "3": "b", "4": "b", "5": "b", "6": "b", - "7": "b" - }), - allPartitions - ); - partitionsToOwn.sort(); - // "a" should have 4 partitions in order to be balanced. - // Partitions "0", "1", "2" should be chosen before any are stolen. - partitionsToOwn.length.should.equal(4, "should have claimed half of the partitions."); - partitionsToOwn - .slice(0, 3) - .should.deep.equal(["0", "1", "2"], "should have claimed unclaimed partitions first."); - }); - it("don't steal when you can just wait", () => { - // @chradek's case: let's say we have this partition layout: - // AAAABBBCCD - // - // Before, we'd let 'C' steal from 'A' - we see that we don't have enough - // +1 processors(exact match) and so 'C' attempts to become one. This can - // lead to some unnecessary thrash as 'A' loses partitions to a processor - // that has technically already met it's quota. - // - // Instead, we treat 'A' is a +1-ish specifically for when we ('C') - // are checking if we want to grab more partitions. - // - // This allows 'A' to just naturally decline as _actual_ processors grab - // their minimum required partitions rather than forcing it and possibly - // having a partition have to juggle between partitions as they try to - // meet the minimum. - const partitions = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]; - - const lbs = new BalancedLoadBalancingStrategy(1000 * 60); - - // we'll do 4 consumers - const initialOwnershipMap = createOwnershipMap({ - "0": "a", - "1": "a", - "2": "a", - "3": "a", - - "4": "b", - "5": "b", - "6": "b", - - "7": "c", - "8": "c", - - "9": "d" - }); + "7": "c", + "8": "c", - const requestedPartitions = lbs.getPartitionsToCliam("c", initialOwnershipMap, partitions); - requestedPartitions.sort(); - - requestedPartitions.should.deep.equal( - [], - "c will not steal one partition since it sees that, eventually, 'a' will lose its partitions and become a +1 processor on it's own" - ); - }); - - it("avoid thrash", () => { - // this is a case where we shouldn't steal - we have - // the minimum number of partitions and stealing at this - // point will just keep thrashing both processors. - const partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "a", - "1": "b", - "2": "a" - }), - ["0", "1", "2"] - ); - - partitionsToOwn.sort(); - partitionsToOwn.should.deep.equal([], "should not re-steal when things are balanced"); - }); + "9": "d" + }); - it("general cases", () => { - const allPartitions = ["0", "1", "2", "3"]; - - // in the presence of no owners we claim a random partition - let partitionsToOwn = lb.getPartitionsToCliam("a", createOwnershipMap({}), allPartitions); - partitionsToOwn.length.should.be.equal(4, "nothing is owned, claim all"); - - // if there are other owners we should claim up to #partitions/#owners - partitionsToOwn = lb.getPartitionsToCliam( - "a", - createOwnershipMap({ - "1": "b", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.length.should.be.equal(1, "1 and 1 with another owner, should claim one"); - // better not try to claim 'b's partition when there are unowned partitions - partitionsToOwn.filter((p) => p === "1").length.should.equal(0); - - // 'b' should claim the last unowned partition - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "1": "b", - "2": "a", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal(["0"], "b grabbed the last available partition"); - - // we're balanced - processors now only grab the partitions that they own - partitionsToOwn = lb.getPartitionsToCliam( - "b", - createOwnershipMap({ - "0": "b", - "1": "a", - "2": "b", - "3": "a" - }), - allPartitions - ); - partitionsToOwn.sort(); - partitionsToOwn.should.be.deep.equal([], "balanced: b should not grab anymore partitions"); - }); + const requestedPartitions = lbs.getPartitionsToCliam("c", initialOwnershipMap, partitions); + requestedPartitions.sort(); - it("honors the partitionOwnershipExpirationIntervalInMs", () => { - const intervalInMs = 1000; - const lbs = new GreedyLoadBalancingStrategy(intervalInMs); - const allPartitions = ["0", "1", "2", "3"]; - const ownershipMap = createOwnershipMap({ - "0": "b", - "1": "a" + requestedPartitions.should.deep.equal( + [], + "c will not steal one partition since it sees that, eventually, 'a' will lose its partitions and become a +1 processor on it's own" + ); }); - // At this point, "a" should only grab 1 partition since both "a" and "b" should end up with 2 partitions each. - let partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); - partitionsToOwn.length.should.equal(1, "Expected to claim 1 new partitions."); + it("avoid thrash", () => { + // this is a case where we shouldn't steal - we have + // the minimum number of partitions and stealing at this + // point will just keep thrashing both processors. + const partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "a", + "1": "b", + "2": "a" + }), + ["0", "1", "2"] + ); + + partitionsToOwn.sort(); + partitionsToOwn.should.deep.equal([], "should not re-steal when things are balanced"); + }); - // Change the ownership of partition "0" so it is older than the interval. - const ownership = ownershipMap.get("0")!; - ownership.lastModifiedTimeInMs = Date.now() - (intervalInMs + 1); // Add 1 to the interval to ensure it has just expired. + it("general cases", () => { + const allPartitions = ["0", "1", "2", "3"]; + + // in the presence of no owners we claim a random partition + let partitionsToOwn = lb.getPartitionsToCliam("a", createOwnershipMap({}), allPartitions); + partitionsToOwn.length.should.be.equal(4, "nothing is owned, claim all"); + + // if there are other owners we should claim up to #partitions/#owners + partitionsToOwn = lb.getPartitionsToCliam( + "a", + createOwnershipMap({ + "1": "b", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.length.should.be.equal(1, "1 and 1 with another owner, should claim one"); + // better not try to claim 'b's partition when there are unowned partitions + partitionsToOwn.filter((p) => p === "1").length.should.equal(0); + + // 'b' should claim the last unowned partition + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "1": "b", + "2": "a", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal(["0"], "b grabbed the last available partition"); + + // we're balanced - processors now only grab the partitions that they own + partitionsToOwn = lb.getPartitionsToCliam( + "b", + createOwnershipMap({ + "0": "b", + "1": "a", + "2": "b", + "3": "a" + }), + allPartitions + ); + partitionsToOwn.sort(); + partitionsToOwn.should.be.deep.equal([], "balanced: b should not grab anymore partitions"); + }); - // At this point, "a" should grab partitions 0, 2, and 3. - // This is because "b" only owned 1 partition and that claim is expired, - // so "a" as treated as if it is the only owner. - partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); - partitionsToOwn.sort(); - partitionsToOwn.should.deep.equal(["0", "2", "3"]); + it("honors the partitionOwnershipExpirationIntervalInMs", () => { + const intervalInMs = 1000; + const lbs = new GreedyLoadBalancingStrategy(intervalInMs); + const allPartitions = ["0", "1", "2", "3"]; + const ownershipMap = createOwnershipMap({ + "0": "b", + "1": "a" + }); + + // At this point, "a" should only grab 1 partition since both "a" and "b" should end up with 2 partitions each. + let partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); + partitionsToOwn.length.should.equal(1, "Expected to claim 1 new partitions."); + + // Change the ownership of partition "0" so it is older than the interval. + const ownership = ownershipMap.get("0")!; + ownership.lastModifiedTimeInMs = Date.now() - (intervalInMs + 1); // Add 1 to the interval to ensure it has just expired. + + // At this point, "a" should grab partitions 0, 2, and 3. + // This is because "b" only owned 1 partition and that claim is expired, + // so "a" as treated as if it is the only owner. + partitionsToOwn = lbs.getPartitionsToCliam("a", ownershipMap, allPartitions); + partitionsToOwn.sort(); + partitionsToOwn.should.deep.equal(["0", "2", "3"]); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/misc.spec.ts b/sdk/eventhub/event-hubs/test/internal/misc.spec.ts index 4b82855c07c6..ca6d5617dace 100644 --- a/sdk/eventhub/event-hubs/test/internal/misc.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/misc.spec.ts @@ -24,453 +24,469 @@ import { } from "../../src/diagnostics/instrumentEventData"; import { TraceFlags } from "@azure/core-tracing"; import { SubscriptionHandlerForTests } from "../public/utils/subscriptionHandlerForTests"; -const env = getEnvVars(); - -describe("Misc tests", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let consumerClient: EventHubConsumerClient; - let producerClient: EventHubProducerClient; - let hubInfo: EventHubProperties; - let partitionId: string; - let lastEnqueuedOffset: number; - - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - hubInfo = await consumerClient.getEventHubProperties(); - partitionId = hubInfo.partitionIds[0]; - lastEnqueuedOffset = (await consumerClient.getPartitionProperties(partitionId)) - .lastEnqueuedOffset; - }); + after("Stopping mock service", () => { + return service?.stop(); + }); + } - afterEach(async () => { - debug("Closing the clients.."); - await producerClient.close(); - await consumerClient.close(); - }); + describe("Misc tests", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let consumerClient: EventHubConsumerClient; + let producerClient: EventHubProducerClient; + let hubInfo: EventHubProperties; + let partitionId: string; + let lastEnqueuedOffset: number; + + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should be able to send and receive a large message correctly", async function(): Promise< - void - > { - const bodysize = 220 * 1024; - const msgString = "A".repeat(220 * 1024); - const msgBody = Buffer.from(msgString); - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message with %d bytes.", bodysize); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - should.equal(data[0].body.toString(), msgString); - should.not.exist((data[0].properties || {}).message_id); - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path ); + hubInfo = await consumerClient.getEventHubProperties(); + partitionId = hubInfo.partitionIds[0]; + lastEnqueuedOffset = (await consumerClient.getPartitionProperties(partitionId)) + .lastEnqueuedOffset; }); - await subscription!.close(); - }); - it("should be able to send and receive a JSON object as a message correctly", async function(): Promise< - void - > { - const msgBody = { - id: "123-456-789", - weight: 10, - isBlue: true, - siblings: [ - { - id: "098-789-564", - weight: 20, - isBlue: false - } - ] - }; - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - should.not.exist((data[0].properties || {}).message_id); - resolve(); + afterEach(async () => { + debug("Closing the clients.."); + await producerClient.close(); + await consumerClient.close(); + }); + + it("should be able to send and receive a large message correctly", async function(): Promise< + void + > { + const bodysize = 220 * 1024; + const msgString = "A".repeat(220 * 1024); + const msgBody = Buffer.from(msgString); + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message with %d bytes.", bodysize); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + should.equal(data[0].body.toString(), msgString); + should.not.exist((data[0].properties || {}).message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: lastEnqueuedOffset } } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - }); - it("should be able to send and receive an array as a message correctly", async function(): Promise< - void - > { - const msgBody = [ - { - id: "098-789-564", - weight: 20, - isBlue: false - }, - 10, - 20, - "some string" - ]; - const obj: EventData = { body: msgBody, properties: { message_id: uuid() } }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - assert.strictEqual(data[0].properties!.message_id, obj.properties!.message_id); - resolve(); + it("should be able to send and receive a JSON object as a message correctly", async function(): Promise< + void + > { + const msgBody = { + id: "123-456-789", + weight: 10, + isBlue: true, + siblings: [ + { + id: "098-789-564", + weight: 20, + isBlue: false + } + ] + }; + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + should.not.exist((data[0].properties || {}).message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: lastEnqueuedOffset } } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - }); - it("should be able to send a boolean as a message correctly", async function(): Promise { - const msgBody = true; - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, + it("should be able to send and receive an array as a message correctly", async function(): Promise< + void + > { + const msgBody = [ { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - should.not.exist((data[0].properties || {}).message_id); - resolve(); + id: "098-789-564", + weight: 20, + isBlue: false + }, + 10, + 20, + "some string" + ]; + const obj: EventData = { body: msgBody, properties: { message_id: uuid() } }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + assert.strictEqual(data[0].properties!.message_id, obj.properties!.message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: lastEnqueuedOffset } } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - }); - it("should be able to send and receive batched messages correctly ", async function(): Promise< - void - > { - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - const messageCount = 5; - const d: EventData[] = []; - for (let i = 0; i < messageCount; i++) { - const obj: EventData = { body: `Hello EH ${i}` }; - d.push(obj); - } - - await producerClient.sendBatch(d, { partitionId }); - debug("Successfully sent 5 messages batched together."); - - let subscription: Subscription | undefined; - const receivedMsgs: ReceivedEventData[] = []; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - receivedMsgs.push(...data); - if (receivedMsgs.length === 5) { + it("should be able to send a boolean as a message correctly", async function(): Promise { + const msgBody = true; + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + should.not.exist((data[0].properties || {}).message_id); resolve(); + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: lastEnqueuedOffset } } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - receivedMsgs.length.should.equal(5); - for (const message of receivedMsgs) { - should.not.exist((message.properties || {}).message_id); - } - }); - it("should be able to send and receive batched messages as JSON objects correctly ", async function(): Promise< - void - > { - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - const messageCount = 5; - const d: EventData[] = []; - for (let i = 0; i < messageCount; i++) { - const obj: EventData = { - body: { - id: "123-456-789", - count: i, - weight: 10, - isBlue: true, - siblings: [ - { - id: "098-789-564", - weight: 20, - isBlue: false + it("should be able to send and receive batched messages correctly ", async function(): Promise< + void + > { + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + const messageCount = 5; + const d: EventData[] = []; + for (let i = 0; i < messageCount; i++) { + const obj: EventData = { body: `Hello EH ${i}` }; + d.push(obj); + } + + await producerClient.sendBatch(d, { partitionId }); + debug("Successfully sent 5 messages batched together."); + + let subscription: Subscription | undefined; + const receivedMsgs: ReceivedEventData[] = []; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + receivedMsgs.push(...data); + if (receivedMsgs.length === 5) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } - ] - }, - properties: { - message_id: uuid() - } - }; - d.push(obj); - } + }, + { + startPosition: { offset: lastEnqueuedOffset } + } + ); + }); + await subscription!.close(); + receivedMsgs.length.should.equal(5); + for (const message of receivedMsgs) { + should.not.exist((message.properties || {}).message_id); + } + }); - await producerClient.sendBatch(d, { partitionId }); - debug("Successfully sent 5 messages batched together."); + it("should be able to send and receive batched messages as JSON objects correctly ", async function(): Promise< + void + > { + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + const messageCount = 5; + const d: EventData[] = []; + for (let i = 0; i < messageCount; i++) { + const obj: EventData = { + body: { + id: "123-456-789", + count: i, + weight: 10, + isBlue: true, + siblings: [ + { + id: "098-789-564", + weight: 20, + isBlue: false + } + ] + }, + properties: { + message_id: uuid() + } + }; + d.push(obj); + } - let subscription: Subscription | undefined; - const receivedMsgs: ReceivedEventData[] = []; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - receivedMsgs.push(...data); - if (receivedMsgs.length === 5) { - resolve(); + await producerClient.sendBatch(d, { partitionId }); + debug("Successfully sent 5 messages batched together."); + + let subscription: Subscription | undefined; + const receivedMsgs: ReceivedEventData[] = []; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + receivedMsgs.push(...data); + if (receivedMsgs.length === 5) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: lastEnqueuedOffset } } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); + ); + }); + await subscription!.close(); + should.equal(receivedMsgs[0].body.count, 0); + should.equal(receivedMsgs.length, 5); + for (const [index, message] of receivedMsgs.entries()) { + assert.strictEqual(message.properties!.message_id, d[index].properties!.message_id); + } }); - await subscription!.close(); - should.equal(receivedMsgs[0].body.count, 0); - should.equal(receivedMsgs.length, 5); - for (const [index, message] of receivedMsgs.entries()) { - assert.strictEqual(message.properties!.message_id, d[index].properties!.message_id); - } - }); - it("should consistently send messages with partitionkey to a partitionId", async function(): Promise< - void - > { - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(consumerClient); - - const msgToSendCount = 50; - debug("Sending %d messages.", msgToSendCount); - - function getRandomInt(max: number): number { - return Math.floor(Math.random() * Math.floor(max)); - } - - const senderPromises = []; - - for (let i = 0; i < msgToSendCount; i++) { - const partitionKey = getRandomInt(10); - senderPromises.push( - producerClient.sendBatch([{ body: "Hello EventHub " + i }], { - partitionKey: partitionKey.toString() - }) - ); - } + it("should consistently send messages with partitionkey to a partitionId", async function(): Promise< + void + > { + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(consumerClient); - await Promise.all(senderPromises); + const msgToSendCount = 50; + debug("Sending %d messages.", msgToSendCount); - debug("Starting to receive all messages from each partition."); - const partitionMap: any = {}; + function getRandomInt(max: number): number { + return Math.floor(Math.random() * Math.floor(max)); + } - let subscription: Subscription | undefined = undefined; + const senderPromises = []; - try { - subscription = consumerClient.subscribe(subscriptionEventHandler, { - startPosition - }); - const receivedEvents = await subscriptionEventHandler.waitForFullEvents( - hubInfo.partitionIds, - msgToSendCount - ); + for (let i = 0; i < msgToSendCount; i++) { + const partitionKey = getRandomInt(10); + senderPromises.push( + producerClient.sendBatch([{ body: "Hello EventHub " + i }], { + partitionKey: partitionKey.toString() + }) + ); + } + + await Promise.all(senderPromises); + + debug("Starting to receive all messages from each partition."); + const partitionMap: any = {}; - for (const d of receivedEvents) { - debug(">>>> _raw_amqp_mesage: ", (d as any)._raw_amqp_mesage); - const pk = d.event.partitionKey as string; - debug("pk: ", pk); + let subscription: Subscription | undefined = undefined; - if (partitionMap[pk] && partitionMap[pk] !== d.partitionId) { - debug( - `#### Error: Received a message from partition ${d.partitionId} with partition key ${pk}, whereas the same key was observed on partition ${partitionMap[pk]} before.` - ); - assert(partitionMap[pk] === d.partitionId); + try { + subscription = consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); + const receivedEvents = await subscriptionEventHandler.waitForFullEvents( + hubInfo.partitionIds, + msgToSendCount + ); + + for (const d of receivedEvents) { + debug(">>>> _raw_amqp_mesage: ", (d as any)._raw_amqp_mesage); + const pk = d.event.partitionKey as string; + debug("pk: ", pk); + + if (partitionMap[pk] && partitionMap[pk] !== d.partitionId) { + debug( + `#### Error: Received a message from partition ${d.partitionId} with partition key ${pk}, whereas the same key was observed on partition ${partitionMap[pk]} before.` + ); + assert(partitionMap[pk] === d.partitionId); + } + partitionMap[pk] = d.partitionId; + debug("partitionMap ", partitionMap); } - partitionMap[pk] = d.partitionId; - debug("partitionMap ", partitionMap); - } - } finally { - if (subscription) { - await subscription.close(); - } - await consumerClient.close(); - } - }); -}).timeout(60000); - -describe("extractSpanContextFromEventData", function() { - it("should extract a SpanContext from a properly instrumented EventData", function() { - const traceId = "11111111111111111111111111111111"; - const spanId = "2222222222222222"; - const flags = "00"; - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null, - properties: { - [TRACEPARENT_PROPERTY]: `00-${traceId}-${spanId}-${flags}` - }, - getRawAmqpMessage() { - return {} as any; + } finally { + if (subscription) { + await subscription.close(); + } + await consumerClient.close(); } - }; + }); + }).timeout(60000); + + describe("extractSpanContextFromEventData", function() { + it("should extract a SpanContext from a properly instrumented EventData", function() { + const traceId = "11111111111111111111111111111111"; + const spanId = "2222222222222222"; + const flags = "00"; + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null, + properties: { + [TRACEPARENT_PROPERTY]: `00-${traceId}-${spanId}-${flags}` + }, + getRawAmqpMessage() { + return {} as any; + } + }; - const spanContext = extractSpanContextFromEventData(eventData); + const spanContext = extractSpanContextFromEventData(eventData); - should.exist(spanContext, "Extracted spanContext should be defined."); - should.equal(spanContext!.traceId, traceId, "Extracted traceId does not match expectation."); - should.equal(spanContext!.spanId, spanId, "Extracted spanId does not match expectation."); - should.equal( - spanContext!.traceFlags, - TraceFlags.NONE, - "Extracted traceFlags do not match expectations." - ); - }); + should.exist(spanContext, "Extracted spanContext should be defined."); + should.equal(spanContext!.traceId, traceId, "Extracted traceId does not match expectation."); + should.equal(spanContext!.spanId, spanId, "Extracted spanId does not match expectation."); + should.equal( + spanContext!.traceFlags, + TraceFlags.NONE, + "Extracted traceFlags do not match expectations." + ); + }); - it("should return undefined when EventData is not properly instrumented", function() { - const traceId = "11111111111111111111111111111111"; - const spanId = "2222222222222222"; - const flags = "00"; - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null, - properties: { - [TRACEPARENT_PROPERTY]: `99-${traceId}-${spanId}-${flags}` - }, - getRawAmqpMessage() { - return {} as any; - } - }; + it("should return undefined when EventData is not properly instrumented", function() { + const traceId = "11111111111111111111111111111111"; + const spanId = "2222222222222222"; + const flags = "00"; + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null, + properties: { + [TRACEPARENT_PROPERTY]: `99-${traceId}-${spanId}-${flags}` + }, + getRawAmqpMessage() { + return {} as any; + } + }; - const spanContext = extractSpanContextFromEventData(eventData); + const spanContext = extractSpanContextFromEventData(eventData); - should.not.exist( - spanContext, - "Invalid diagnosticId version should return undefined spanContext." - ); - }); + should.not.exist( + spanContext, + "Invalid diagnosticId version should return undefined spanContext." + ); + }); - it("should return undefined when EventData is not instrumented", function() { - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null, - getRawAmqpMessage() { - return {} as any; - } - }; + it("should return undefined when EventData is not instrumented", function() { + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null, + getRawAmqpMessage() { + return {} as any; + } + }; - const spanContext = extractSpanContextFromEventData(eventData); + const spanContext = extractSpanContextFromEventData(eventData); - should.not.exist( - spanContext, - `Missing property "${TRACEPARENT_PROPERTY}" should return undefined spanContext.` - ); + should.not.exist( + spanContext, + `Missing property "${TRACEPARENT_PROPERTY}" should return undefined spanContext.` + ); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/node/disconnect.spec.ts b/sdk/eventhub/event-hubs/test/internal/node/disconnect.spec.ts index 1d711ec0dbd0..78d3ff99f37a 100644 --- a/sdk/eventhub/event-hubs/test/internal/node/disconnect.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/node/disconnect.spec.ts @@ -12,185 +12,201 @@ import { stub } from "sinon"; import { MessagingError } from "@azure/core-amqp"; import { EventHubReceiver } from "../../../src/eventHubReceiver"; import { EventHubConsumerClient, latestEventPosition } from "../../../src"; -const env = getEnvVars(); - -describe("disconnected", function() { - let partitionIds: string[] = []; - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - before("get partition ids", async function() { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await client.getPartitionIds(); - return client.close(); - }); - - describe("EventHubSender", function() { - /** - * Test added for issue https://github.com/Azure/azure-sdk-for-js/issues/15002 - * Prior to fixing this issue, a TypeError would be thrown when this test was ran. - */ - it("send works after disconnect", async () => { - const context = createConnectionContext(service.connectionString, service.path); - const sender = EventHubSender.create(context); - - // Create the sender link via getMaxMessageSize() so we can check when 'send' is about to be called on it. - await sender.getMaxMessageSize(); - should.equal(sender.isOpen(), true, "Expected sender to be open."); - - // Here we stub out the 'send' call on the AwaitableSender. - // We do 2 things: - // 1. Call `idle()` on the underlying rhea connection so that a disconnect is triggered. - // 2. Reject with a MessagingError. - // The MessagingError is thrown so that the send operation will be retried. - // The disconnect that's triggered will cause the existing AwaitableSender to be closed. - - // If everything works as expected, then a new AwaitableSender should be created on the next - // retry attempt and the event should be successfully sent. - const senderLink = sender["_sender"]!; - const sendStub = stub(senderLink, "send"); - sendStub.callsFake(async () => { - context.connection["_connection"].idle(); - throw new MessagingError("Fake rejection!"); - }); +import { createMockServer } from "../../public/utils/mockService"; +import { testWithServiceTypes } from "../../public/utils/testWithServiceTypes"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - await sender.send([{ body: "foo" }]); + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("disconnected", function() { + let partitionIds: string[] = []; + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - await context.close(); + before("get partition ids", async function() { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + partitionIds = await client.getPartitionIds(); + return client.close(); }); - }); - describe("ConnectionContext", function() { - describe("onDisconnected", function() { - it("does not fail when entities are closed concurrently", async () => { + describe("EventHubSender", function() { + /** + * Test added for issue https://github.com/Azure/azure-sdk-for-js/issues/15002 + * Prior to fixing this issue, a TypeError would be thrown when this test was ran. + */ + it("send works after disconnect", async () => { const context = createConnectionContext(service.connectionString, service.path); - - // Add 2 receivers. - const receiver1 = new EventHubReceiver( - context, - EventHubConsumerClient.defaultConsumerGroupName, - partitionIds[0], - latestEventPosition - ); - const receiver2 = new EventHubReceiver( - context, - EventHubConsumerClient.defaultConsumerGroupName, - partitionIds[1], - latestEventPosition - ); - - // Add 2 senders. - const sender1 = new EventHubSender(context); - const sender2 = new EventHubSender(context); - - // Initialize sender links - await sender1["_getLink"](); - await sender2["_getLink"](); - - // Initialize receiver links - await receiver1.initialize({ - abortSignal: undefined, - timeoutInMs: 60000 - }); - await receiver2.initialize({ - abortSignal: undefined, - timeoutInMs: 60000 + const sender = EventHubSender.create(context); + + // Create the sender link via getMaxMessageSize() so we can check when 'send' is about to be called on it. + await sender.getMaxMessageSize(); + should.equal(sender.isOpen(), true, "Expected sender to be open."); + + // Here we stub out the 'send' call on the AwaitableSender. + // We do 2 things: + // 1. Call `idle()` on the underlying rhea connection so that a disconnect is triggered. + // 2. Reject with a MessagingError. + // The MessagingError is thrown so that the send operation will be retried. + // The disconnect that's triggered will cause the existing AwaitableSender to be closed. + + // If everything works as expected, then a new AwaitableSender should be created on the next + // retry attempt and the event should be successfully sent. + const senderLink = sender["_sender"]!; + const sendStub = stub(senderLink, "send"); + sendStub.callsFake(async () => { + context.connection["_connection"].idle(); + throw new MessagingError("Fake rejection!"); }); - // We are going to override sender1's close method so that it also invokes receiver2's close method. - const sender1Close = sender1.close.bind(sender1); - sender1.close = async function() { - sender2.close().catch(() => { - /* no-op */ - }); - return sender1Close(); - }; - - // We are going to override receiver1's close method so that it also invokes receiver2's close method. - const receiver1Close = receiver1.close.bind(receiver1); - receiver1.close = async function() { - receiver2.close().catch(() => { - /* no-op */ - }); - return receiver1Close(); - }; + await sender.send([{ body: "foo" }]); - context.connection["_connection"].idle(); - await context.readyToOpenLink(); await context.close(); }); }); - describe("close", function() { - it("does not fail when entities are closed concurrently", async () => { - const context = createConnectionContext(service.connectionString, service.path); + describe("ConnectionContext", function() { + describe("onDisconnected", function() { + it("does not fail when entities are closed concurrently", async () => { + const context = createConnectionContext(service.connectionString, service.path); + + // Add 2 receivers. + const receiver1 = new EventHubReceiver( + context, + EventHubConsumerClient.defaultConsumerGroupName, + partitionIds[0], + latestEventPosition + ); + const receiver2 = new EventHubReceiver( + context, + EventHubConsumerClient.defaultConsumerGroupName, + partitionIds[1], + latestEventPosition + ); + + // Add 2 senders. + const sender1 = new EventHubSender(context); + const sender2 = new EventHubSender(context); + + // Initialize sender links + await sender1["_getLink"](); + await sender2["_getLink"](); + + // Initialize receiver links + await receiver1.initialize({ + abortSignal: undefined, + timeoutInMs: 60000 + }); + await receiver2.initialize({ + abortSignal: undefined, + timeoutInMs: 60000 + }); - // Add 2 receivers. - const receiver1 = new EventHubReceiver( - context, - EventHubConsumerClient.defaultConsumerGroupName, - partitionIds[0], - latestEventPosition - ); - const receiver2 = new EventHubReceiver( - context, - EventHubConsumerClient.defaultConsumerGroupName, - partitionIds[1], - latestEventPosition - ); - - // Add 2 senders. - const sender1 = new EventHubSender(context); - const sender2 = new EventHubSender(context); - - // Initialize sender links - await sender1["_getLink"](); - await sender2["_getLink"](); - - // Initialize receiver links - await receiver1.initialize({ - abortSignal: undefined, - timeoutInMs: 60000 - }); - await receiver2.initialize({ - abortSignal: undefined, - timeoutInMs: 60000 + // We are going to override sender1's close method so that it also invokes receiver2's close method. + const sender1Close = sender1.close.bind(sender1); + sender1.close = async function() { + sender2.close().catch(() => { + /* no-op */ + }); + return sender1Close(); + }; + + // We are going to override receiver1's close method so that it also invokes receiver2's close method. + const receiver1Close = receiver1.close.bind(receiver1); + receiver1.close = async function() { + receiver2.close().catch(() => { + /* no-op */ + }); + return receiver1Close(); + }; + + context.connection["_connection"].idle(); + await context.readyToOpenLink(); + await context.close(); }); + }); - // We are going to override sender1's close method so that it also invokes receiver2's close method. - const sender1Close = sender1.close.bind(sender1); - sender1.close = async function() { - sender2.close().catch(() => { - /* no-op */ + describe("close", function() { + it("does not fail when entities are closed concurrently", async () => { + const context = createConnectionContext(service.connectionString, service.path); + + // Add 2 receivers. + const receiver1 = new EventHubReceiver( + context, + EventHubConsumerClient.defaultConsumerGroupName, + partitionIds[0], + latestEventPosition + ); + const receiver2 = new EventHubReceiver( + context, + EventHubConsumerClient.defaultConsumerGroupName, + partitionIds[1], + latestEventPosition + ); + + // Add 2 senders. + const sender1 = new EventHubSender(context); + const sender2 = new EventHubSender(context); + + // Initialize sender links + await sender1["_getLink"](); + await sender2["_getLink"](); + + // Initialize receiver links + await receiver1.initialize({ + abortSignal: undefined, + timeoutInMs: 60000 }); - return sender1Close(); - }; - - // We are going to override receiver1's close method so that it also invokes receiver2's close method. - const originalClose = receiver1.close.bind(receiver1); - receiver1.close = async function() { - receiver2.close().catch(() => { - /* no-op */ + await receiver2.initialize({ + abortSignal: undefined, + timeoutInMs: 60000 }); - return originalClose(); - }; - await context.close(); + + // We are going to override sender1's close method so that it also invokes receiver2's close method. + const sender1Close = sender1.close.bind(sender1); + sender1.close = async function() { + sender2.close().catch(() => { + /* no-op */ + }); + return sender1Close(); + }; + + // We are going to override receiver1's close method so that it also invokes receiver2's close method. + const originalClose = receiver1.close.bind(receiver1); + receiver1.close = async function() { + receiver2.close().catch(() => { + /* no-op */ + }); + return originalClose(); + }; + await context.close(); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/node/packageInfo.spec.ts b/sdk/eventhub/event-hubs/test/internal/node/packageInfo.spec.ts index 6355ca5d7335..c26cf4938c71 100644 --- a/sdk/eventhub/event-hubs/test/internal/node/packageInfo.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/node/packageInfo.spec.ts @@ -6,27 +6,31 @@ const should = chai.should(); import fs from "fs"; import path from "path"; import { packageJsonInfo } from "../../../src/util/constants"; +import { testWithServiceTypes } from "../../public/utils/testWithServiceTypes"; // Since we currently hardcode package name and version in `constants.ts` file, // following test is in place to ensure the values in package.json and in this file are consistent -describe("Ensure package name and version are consistent in SDK and package.json", function(): void { - it("Ensure constants.ts file is consistent with package.json", () => { - const packageJsonFilePath = path.join(__dirname, "..", "..", "..", "..", "package.json"); - const rawFileContents = fs.readFileSync(packageJsonFilePath, { encoding: "utf-8" }); - const packageJsonContents = JSON.parse(rawFileContents); - const name = packageJsonContents.name; - const version = packageJsonContents.version; +testWithServiceTypes(() => { + describe("Ensure package name and version are consistent in SDK and package.json", function(): void { + it("Ensure constants.ts file is consistent with package.json", () => { + const packageJsonFilePath = path.join(__dirname, "..", "..", "..", "..", "package.json"); + const rawFileContents = fs.readFileSync(packageJsonFilePath, { encoding: "utf-8" }); + const packageJsonContents = JSON.parse(rawFileContents); - should.equal( - packageJsonInfo.name, - name, - `${name} from package.json is not same as 'name' used in constants.ts` - ); - should.equal( - packageJsonInfo.version, - version, - `${version} from package.json is not same as 'version' used in constants.ts` - ); + const name = packageJsonContents.name; + const version = packageJsonContents.version; + + should.equal( + packageJsonInfo.name, + name, + `${name} from package.json is not same as 'name' used in constants.ts` + ); + should.equal( + packageJsonInfo.version, + version, + `${version} from package.json is not same as 'version' used in constants.ts` + ); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/partitionPump.spec.ts b/sdk/eventhub/event-hubs/test/internal/partitionPump.spec.ts index f8c77f374fe5..19cca3013964 100644 --- a/sdk/eventhub/event-hubs/test/internal/partitionPump.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/partitionPump.spec.ts @@ -15,143 +15,150 @@ import chai from "chai"; import { ReceivedEventData } from "../../src/eventData"; import { instrumentEventData } from "../../src/diagnostics/instrumentEventData"; import { setTracerForTest } from "../public/utils/testUtils"; +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; const should = chai.should(); -describe("PartitionPump", () => { - describe("telemetry", () => { - const eventHubProperties = { - host: "thehost", - entityPath: "theeventhubname" - }; - - class TestTracer2 extends TestTracer { - public spanOptions: SpanOptions | undefined; - public spanName: string | undefined; - public context: Context | undefined; - - startSpan(nameArg: string, optionsArg?: SpanOptions, contextArg?: Context): TestSpan { - this.spanName = nameArg; - this.spanOptions = optionsArg; - this.context = contextArg; - return super.startSpan(nameArg, optionsArg, this.context); - } - } - - it("basic span properties are set", async () => { - const { tracer, resetTracer } = setTracerForTest(new TestTracer2()); - const fakeParentSpanContext = setSpanContext( - context.active(), - tracer.startSpan("test").spanContext() - ); - - await createProcessingSpan([], eventHubProperties, { - tracingOptions: { - tracingContext: fakeParentSpanContext +testWithServiceTypes(() => { + describe("PartitionPump", () => { + describe("telemetry", () => { + const eventHubProperties = { + host: "thehost", + entityPath: "theeventhubname" + }; + + class TestTracer2 extends TestTracer { + public spanOptions: SpanOptions | undefined; + public spanName: string | undefined; + public context: Context | undefined; + + startSpan(nameArg: string, optionsArg?: SpanOptions, contextArg?: Context): TestSpan { + this.spanName = nameArg; + this.spanOptions = optionsArg; + this.context = contextArg; + return super.startSpan(nameArg, optionsArg, this.context); } - }); + } - should.equal(tracer.spanName, "Azure.EventHubs.process"); + it("basic span properties are set", async () => { + const { tracer, resetTracer } = setTracerForTest(new TestTracer2()); + const fakeParentSpanContext = setSpanContext( + context.active(), + tracer.startSpan("test").spanContext() + ); - should.exist(tracer.spanOptions); - tracer.spanOptions!.kind!.should.equal(SpanKind.CONSUMER); - tracer.context!.should.equal(fakeParentSpanContext); + await createProcessingSpan([], eventHubProperties, { + tracingOptions: { + tracingContext: fakeParentSpanContext + } + }); - const attributes = tracer.getActiveSpans().find((s) => s.name === "Azure.EventHubs.process") - ?.attributes; + should.equal(tracer.spanName, "Azure.EventHubs.process"); - attributes!.should.deep.equal({ - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": eventHubProperties.entityPath, - "peer.address": eventHubProperties.host - }); + should.exist(tracer.spanOptions); + tracer.spanOptions!.kind!.should.equal(SpanKind.CONSUMER); + tracer.context!.should.equal(fakeParentSpanContext); - resetTracer(); - }); + const attributes = tracer.getActiveSpans().find((s) => s.name === "Azure.EventHubs.process") + ?.attributes; - it("received events are linked to this span using Diagnostic-Id", async () => { - const requiredEventProperties = { - body: "", - enqueuedTimeUtc: new Date(), - offset: 0, - partitionKey: null, - sequenceNumber: 0, - getRawAmqpMessage() { - return {} as any; - } - }; + attributes!.should.deep.equal({ + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": eventHubProperties.entityPath, + "peer.address": eventHubProperties.host + }); - const { tracer, resetTracer } = setTracerForTest(new TestTracer2()); - - const firstEvent = tracer.startSpan("a"); - const thirdEvent = tracer.startSpan("c"); - - const receivedEvents: ReceivedEventData[] = [ - instrumentEventData( - { ...requiredEventProperties }, - { - tracingOptions: { - tracingContext: setSpanContext(context.active(), firstEvent.spanContext()) - } - }, - "entityPath", - "host" - ).event as ReceivedEventData, - { properties: {}, ...requiredEventProperties }, // no diagnostic ID means it gets skipped - instrumentEventData( - { ...requiredEventProperties }, - { - tracingOptions: { - tracingContext: setSpanContext(context.active(), thirdEvent.spanContext()) - } - }, - "entityPath", - "host" - ).event as ReceivedEventData - ]; - - await createProcessingSpan(receivedEvents, eventHubProperties, {}); - - // middle event, since it has no trace information, doesn't get included - // in the telemetry - tracer.spanOptions!.links!.length.should.equal(3 - 1); - // the test tracer just hands out a string integer that just gets - // incremented - tracer.spanOptions!.links![0]!.context.traceId.should.equal(firstEvent.spanContext().traceId); - (tracer.spanOptions!.links![0]!.attributes!.enqueuedTime as number).should.equal( - requiredEventProperties.enqueuedTimeUtc.getTime() - ); - tracer.spanOptions!.links![1]!.context.traceId.should.equal(thirdEvent.spanContext().traceId); - (tracer.spanOptions!.links![1]!.attributes!.enqueuedTime as number).should.equal( - requiredEventProperties.enqueuedTimeUtc.getTime() - ); - - resetTracer(); - }); + resetTracer(); + }); + + it("received events are linked to this span using Diagnostic-Id", async () => { + const requiredEventProperties = { + body: "", + enqueuedTimeUtc: new Date(), + offset: 0, + partitionKey: null, + sequenceNumber: 0, + getRawAmqpMessage() { + return {} as any; + } + }; + + const { tracer, resetTracer } = setTracerForTest(new TestTracer2()); + + const firstEvent = tracer.startSpan("a"); + const thirdEvent = tracer.startSpan("c"); + + const receivedEvents: ReceivedEventData[] = [ + instrumentEventData( + { ...requiredEventProperties }, + { + tracingOptions: { + tracingContext: setSpanContext(context.active(), firstEvent.spanContext()) + } + }, + "entityPath", + "host" + ).event as ReceivedEventData, + { properties: {}, ...requiredEventProperties }, // no diagnostic ID means it gets skipped + instrumentEventData( + { ...requiredEventProperties }, + { + tracingOptions: { + tracingContext: setSpanContext(context.active(), thirdEvent.spanContext()) + } + }, + "entityPath", + "host" + ).event as ReceivedEventData + ]; + + await createProcessingSpan(receivedEvents, eventHubProperties, {}); + + // middle event, since it has no trace information, doesn't get included + // in the telemetry + tracer.spanOptions!.links!.length.should.equal(3 - 1); + // the test tracer just hands out a string integer that just gets + // incremented + tracer.spanOptions!.links![0]!.context.traceId.should.equal( + firstEvent.spanContext().traceId + ); + (tracer.spanOptions!.links![0]!.attributes!.enqueuedTime as number).should.equal( + requiredEventProperties.enqueuedTimeUtc.getTime() + ); + tracer.spanOptions!.links![1]!.context.traceId.should.equal( + thirdEvent.spanContext().traceId + ); + (tracer.spanOptions!.links![1]!.attributes!.enqueuedTime as number).should.equal( + requiredEventProperties.enqueuedTimeUtc.getTime() + ); + + resetTracer(); + }); - it("trace - normal", async () => { - const tracer = new TestTracer(); - const span = tracer.startSpan("whatever"); + it("trace - normal", async () => { + const tracer = new TestTracer(); + const span = tracer.startSpan("whatever"); - await trace(async () => { - /* no-op */ - }, span); + await trace(async () => { + /* no-op */ + }, span); - span.status!.code.should.equal(SpanStatusCode.OK); - should.equal(span.endCalled, true); - }); + span.status!.code.should.equal(SpanStatusCode.OK); + should.equal(span.endCalled, true); + }); - it("trace - throws", async () => { - const tracer = new TestTracer(); - const span = tracer.startSpan("whatever"); + it("trace - throws", async () => { + const tracer = new TestTracer(); + const span = tracer.startSpan("whatever"); - await trace(async () => { - throw new Error("error thrown from fn"); - }, span).should.be.rejectedWith(/error thrown from fn/); + await trace(async () => { + throw new Error("error thrown from fn"); + }, span).should.be.rejectedWith(/error thrown from fn/); - span.status!.code.should.equal(SpanStatusCode.ERROR); - span.status!.message!.should.equal("error thrown from fn"); - should.equal(span.endCalled, true); + span.status!.code.should.equal(SpanStatusCode.ERROR); + span.status!.message!.should.equal("error thrown from fn"); + should.equal(span.endCalled, true); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts b/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts index 7778db857ea2..4e104229a31c 100644 --- a/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts @@ -16,142 +16,158 @@ import { import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; import { EventHubReceiver } from "../../src/eventHubReceiver"; import { translate } from "@azure/core-amqp"; -const env = getEnvVars(); - -describe("EventHubConsumerClient", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - - afterEach("Closing the clients", async () => { - await producerClient.close(); - await consumerClient.close(); - }); - - describe("EventHubConsumer receiveBatch", function(): void { - it("should not lose messages on error", async () => { - const partitionId = partitionIds[0]; - const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( - partitionId - ); - - // Ensure the receiver only looks at new messages. - const startPosition: EventPosition = { - sequenceNumber: lastEnqueuedSequenceNumber, - isInclusive: false - }; - - // Send a message we expect to receive. - const message: EventData = { body: "remember me!" }; - await producerClient.sendBatch([message], { partitionId }); +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - // Disable retries to make it easier to test scenario. - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - startPosition, - { - retryOptions: { - maxRetries: 0 - } - } + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("EventHubConsumerClient", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); - - // Periodically check that the receiver's checkpoint has been updated. - const checkpointInterval = setInterval(() => { - if (receiver.checkpoint > -1) { - clearInterval(checkpointInterval); - const error = translate(new Error("I break receivers for fun.")); - receiver["_onError"]!(error); - } - }, 50); - - try { - // There is only 1 message. - // We expect to see an error. - await receiver.receiveBatch(2, 60); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.not.equal("Test failure"); - receiver.checkpoint.should.be.greaterThan(-1, "Did not see a message come through."); - } finally { - clearInterval(checkpointInterval); - } - - const events = await receiver.receiveBatch(1); - events.length.should.equal(1, "Unexpected number of events received."); - events[0].body.should.equal(message.body, "Unexpected message received."); }); - it("should not lose messages between retries", async () => { - const partitionId = partitionIds[0]; - const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( - partitionId + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path ); + partitionIds = await producerClient.getPartitionIds({}); + }); - // Ensure the receiver only looks at new messages. - const startPosition: EventPosition = { - sequenceNumber: lastEnqueuedSequenceNumber, - isInclusive: false - }; - - // Send a message we expect to receive. - const message: EventData = { body: "remember me!" }; - await producerClient.sendBatch([message], { partitionId }); + afterEach("Closing the clients", async () => { + await producerClient.close(); + await consumerClient.close(); + }); - // Disable retries to make it easier to test scenario. - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - startPosition, - { - retryOptions: { - maxRetries: 1 + describe("EventHubConsumer receiveBatch", function(): void { + it("should not lose messages on error", async () => { + const partitionId = partitionIds[0]; + const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( + partitionId + ); + + // Ensure the receiver only looks at new messages. + const startPosition: EventPosition = { + sequenceNumber: lastEnqueuedSequenceNumber, + isInclusive: false + }; + + // Send a message we expect to receive. + const message: EventData = { body: "remember me!" }; + await producerClient.sendBatch([message], { partitionId }); + + // Disable retries to make it easier to test scenario. + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + startPosition, + { + retryOptions: { + maxRetries: 0 + } } - } - ); - - // Periodically check that the receiver's checkpoint has been updated. - const checkpointInterval = setInterval(() => { - if (receiver.checkpoint > -1) { + ); + + // Periodically check that the receiver's checkpoint has been updated. + const checkpointInterval = setInterval(() => { + if (receiver.checkpoint > -1) { + clearInterval(checkpointInterval); + const error = translate(new Error("I break receivers for fun.")); + receiver["_onError"]!(error); + } + }, 50); + + try { + // There is only 1 message. + // We expect to see an error. + await receiver.receiveBatch(2, 60); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.not.equal("Test failure"); + receiver.checkpoint.should.be.greaterThan(-1, "Did not see a message come through."); + } finally { clearInterval(checkpointInterval); - const error = translate(new Error("I break receivers for fun.")) as MessagingError; - error.retryable = true; - receiver["_onError"]!(error); } - }, 50); - // There is only 1 message. - const events = await receiver.receiveBatch(2, 20); + const events = await receiver.receiveBatch(1); + events.length.should.equal(1, "Unexpected number of events received."); + events[0].body.should.equal(message.body, "Unexpected message received."); + }); + + it("should not lose messages between retries", async () => { + const partitionId = partitionIds[0]; + const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( + partitionId + ); + + // Ensure the receiver only looks at new messages. + const startPosition: EventPosition = { + sequenceNumber: lastEnqueuedSequenceNumber, + isInclusive: false + }; + + // Send a message we expect to receive. + const message: EventData = { body: "remember me!" }; + await producerClient.sendBatch([message], { partitionId }); + + // Disable retries to make it easier to test scenario. + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + startPosition, + { + retryOptions: { + maxRetries: 1 + } + } + ); + + // Periodically check that the receiver's checkpoint has been updated. + const checkpointInterval = setInterval(() => { + if (receiver.checkpoint > -1) { + clearInterval(checkpointInterval); + const error = translate(new Error("I break receivers for fun.")) as MessagingError; + error.retryable = true; + receiver["_onError"]!(error); + } + }, 50); + + // There is only 1 message. + const events = await receiver.receiveBatch(2, 20); - events.length.should.equal(1, "Unexpected number of events received."); - events[0].body.should.equal(message.body, "Unexpected message received."); + events.length.should.equal(1, "Unexpected number of events received."); + events[0].body.should.equal(message.body, "Unexpected message received."); + }); }); - }); -}).timeout(90000); + }).timeout(90000); +}); diff --git a/sdk/eventhub/event-hubs/test/internal/sender.spec.ts b/sdk/eventhub/event-hubs/test/internal/sender.spec.ts index d83c22e8ea36..8fd70aad66cf 100644 --- a/sdk/eventhub/event-hubs/test/internal/sender.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/sender.spec.ts @@ -27,1228 +27,1256 @@ import { SpanGraph, TestSpan } from "@azure/test-utils"; import { TRACEPARENT_PROPERTY } from "../../src/diagnostics/instrumentEventData"; import { SubscriptionHandlerForTests } from "../public/utils/subscriptionHandlerForTests"; import { setSpan, context } from "@azure/core-tracing"; -const env = getEnvVars(); - -describe("EventHub Sender", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let startPosition: { [partitionId: string]: EventPosition }; - - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - startPosition = await getStartingPositionsForTests(consumerClient); - }); - - afterEach(async () => { - debug("Closing the clients.."); - await producerClient.close(); - await consumerClient.close(); - }); - - describe("Create batch", function(): void { - describe("tryAdd", function() { - it("doesn't grow if invalid events are added", async () => { - const batch = await producerClient.createBatch({ maxSizeInBytes: 20 }); - const event = { body: Buffer.alloc(30).toString() }; - - const numToAdd = 5; - let failures = 0; - for (let i = 0; i < numToAdd; i++) { - if (!batch.tryAdd(event)) { - failures++; - } - } +import { testWithServiceTypes } from "../public/utils/testWithServiceTypes"; +import { createMockServer } from "../public/utils/mockService"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - failures.should.equal(5); - batch.sizeInBytes.should.equal(0); - }); + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("EventHub Sender", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let startPosition: { [partitionId: string]: EventPosition }; + + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - it("partitionId is set as expected", async () => { - const batch = await producerClient.createBatch({ - partitionId: "0" - }); - should.equal(batch.partitionId, "0"); + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + startPosition = await getStartingPositionsForTests(consumerClient); }); - it("partitionId is set as expected when it is 0 i.e. falsy", async () => { - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 - }); - should.equal(batch.partitionId, "0"); + afterEach(async () => { + debug("Closing the clients.."); + await producerClient.close(); + await consumerClient.close(); }); - it("partitionKey is set as expected", async () => { - const batch = await producerClient.createBatch({ - partitionKey: "boo" + describe("Create batch", function(): void { + describe("tryAdd", function() { + it("doesn't grow if invalid events are added", async () => { + const batch = await producerClient.createBatch({ maxSizeInBytes: 20 }); + const event = { body: Buffer.alloc(30).toString() }; + + const numToAdd = 5; + let failures = 0; + for (let i = 0; i < numToAdd; i++) { + if (!batch.tryAdd(event)) { + failures++; + } + } + + failures.should.equal(5); + batch.sizeInBytes.should.equal(0); + }); }); - should.equal(batch.partitionKey, "boo"); - }); - it("partitionKey is set as expected when it is 0 i.e. falsy", async () => { - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 + it("partitionId is set as expected", async () => { + const batch = await producerClient.createBatch({ + partitionId: "0" + }); + should.equal(batch.partitionId, "0"); }); - should.equal(batch.partitionKey, "0"); - }); - it("maxSizeInBytes is set as expected", async () => { - const batch = await producerClient.createBatch({ maxSizeInBytes: 30 }); - should.equal(batch.maxSizeInBytes, 30); - }); + it("partitionId is set as expected when it is 0 i.e. falsy", async () => { + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }); + should.equal(batch.partitionId, "0"); + }); - it("should be sent successfully", async function(): Promise { - const list = ["Albert", `${Buffer.from("Mike".repeat(1300000))}`, "Marie"]; + it("partitionKey is set as expected", async () => { + const batch = await producerClient.createBatch({ + partitionKey: "boo" + }); + should.equal(batch.partitionKey, "boo"); + }); - const batch = await producerClient.createBatch({ - partitionId: "0" + it("partitionKey is set as expected when it is 0 i.e. falsy", async () => { + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); + should.equal(batch.partitionKey, "0"); }); - batch.partitionId!.should.equal("0"); - should.not.exist(batch.partitionKey); - batch.maxSizeInBytes.should.be.gt(0); + it("maxSizeInBytes is set as expected", async () => { + const batch = await producerClient.createBatch({ maxSizeInBytes: 30 }); + should.equal(batch.maxSizeInBytes, 30); + }); - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), false); // The Mike message will be rejected - it's over the limit. - should.equal(batch.tryAdd({ body: list[2] }), true); // Marie should get added"; + it("should be sent successfully", async function(): Promise { + const list = ["Albert", `${Buffer.from("Mike".repeat(1300000))}`, "Marie"]; - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + const batch = await producerClient.createBatch({ + partitionId: "0" + }); - const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { startPosition }); - await producerClient.sendBatch(batch); + batch.partitionId!.should.equal("0"); + should.not.exist(batch.partitionKey); + batch.maxSizeInBytes.should.be.gt(0); - let receivedEvents; + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), false); // The Mike message will be rejected - it's over the limit. + should.equal(batch.tryAdd({ body: list[2] }), true); // Marie should get added"; - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); - } finally { - await subscriber.close(); - } + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - // Mike didn't make it - the message was too big for the batch - // and was rejected above. - [list[0], list[2]].should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); + const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); + + let receivedEvents; - it("should be sent successfully when partitionId is 0 i.e. falsy", async function(): Promise< - void - > { - const list = ["Albert", "Marie"]; + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); + } finally { + await subscriber.close(); + } - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 + // Mike didn't make it - the message was too big for the batch + // and was rejected above. + [list[0], list[2]].should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); }); - batch.partitionId!.should.equal("0"); - should.not.exist(batch.partitionKey); - batch.maxSizeInBytes.should.be.gt(0); + it("should be sent successfully when partitionId is 0 i.e. falsy", async function(): Promise< + void + > { + const list = ["Albert", "Marie"]; - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), true); + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }); - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + batch.partitionId!.should.equal("0"); + should.not.exist(batch.partitionKey); + batch.maxSizeInBytes.should.be.gt(0); - const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { startPosition }); - await producerClient.sendBatch(batch); + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), true); - let receivedEvents; + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); - } finally { - await subscriber.close(); - } + const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); - list.should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); + let receivedEvents; - it("should be sent successfully when partitionKey is 0 i.e. falsy", async function(): Promise< - void - > { - const list = ["Albert", "Marie"]; + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); + } finally { + await subscriber.close(); + } - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 + list.should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); }); - batch.partitionKey!.should.equal("0"); - should.not.exist(batch.partitionId); - batch.maxSizeInBytes.should.be.gt(0); + it("should be sent successfully when partitionKey is 0 i.e. falsy", async function(): Promise< + void + > { + const list = ["Albert", "Marie"]; - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), true); + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + batch.partitionKey!.should.equal("0"); + should.not.exist(batch.partitionId); + batch.maxSizeInBytes.should.be.gt(0); - const subscriber = consumerClient.subscribe(subscriptionEventHandler, { - startPosition - }); - await producerClient.sendBatch(batch); + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), true); - let receivedEvents; - const allPartitionIds = await producerClient.getPartitionIds(); - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(allPartitionIds, 2); - } finally { - await subscriber.close(); - } + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - list.should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); + const subscriber = consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); - it("should be sent successfully with properties", async function(): Promise { - const properties = { test: "super" }; - const list = [ - { body: "Albert-With-Properties", properties }, - { body: "Mike-With-Properties", properties }, - { body: "Marie-With-Properties", properties } - ]; + let receivedEvents; + const allPartitionIds = await producerClient.getPartitionIds(); + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(allPartitionIds, 2); + } finally { + await subscriber.close(); + } - const batch = await producerClient.createBatch({ - partitionId: "0" + list.should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); }); - batch.maxSizeInBytes.should.be.gt(0); + it("should be sent successfully with properties", async function(): Promise { + const properties = { test: "super" }; + const list = [ + { body: "Albert-With-Properties", properties }, + { body: "Mike-With-Properties", properties }, + { body: "Marie-With-Properties", properties } + ]; + + const batch = await producerClient.createBatch({ + partitionId: "0" + }); - should.equal(batch.tryAdd(list[0]), true); - should.equal(batch.tryAdd(list[1]), true); - should.equal(batch.tryAdd(list[2]), true); + batch.maxSizeInBytes.should.be.gt(0); - const receivedEvents: ReceivedEventData[] = []; - let waitUntilEventsReceivedResolver: (value?: any) => void; - const waitUntilEventsReceived = new Promise( - (resolve) => (waitUntilEventsReceivedResolver = resolve) - ); + should.equal(batch.tryAdd(list[0]), true); + should.equal(batch.tryAdd(list[1]), true); + should.equal(batch.tryAdd(list[2]), true); - const sequenceNumber = (await consumerClient.getPartitionProperties("0")) - .lastEnqueuedSequenceNumber; + const receivedEvents: ReceivedEventData[] = []; + let waitUntilEventsReceivedResolver: (value?: any) => void; + const waitUntilEventsReceived = new Promise( + (resolve) => (waitUntilEventsReceivedResolver = resolve) + ); - const subscriber = consumerClient.subscribe( - "0", - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - if (receivedEvents.length >= 3) { - waitUntilEventsReceivedResolver(); + const sequenceNumber = (await consumerClient.getPartitionProperties("0")) + .lastEnqueuedSequenceNumber; + + const subscriber = consumerClient.subscribe( + "0", + { + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + if (receivedEvents.length >= 3) { + waitUntilEventsReceivedResolver(); + } } - } - }, - { - startPosition: { - sequenceNumber }, - maxBatchSize: 3 - } - ); + { + startPosition: { + sequenceNumber + }, + maxBatchSize: 3 + } + ); - await producerClient.sendBatch(batch); - await waitUntilEventsReceived; - await subscriber.close(); + await producerClient.sendBatch(batch); + await waitUntilEventsReceived; + await subscriber.close(); - sequenceNumber.should.be.lessThan(receivedEvents[0].sequenceNumber); - sequenceNumber.should.be.lessThan(receivedEvents[1].sequenceNumber); - sequenceNumber.should.be.lessThan(receivedEvents[2].sequenceNumber); + sequenceNumber.should.be.lessThan(receivedEvents[0].sequenceNumber); + sequenceNumber.should.be.lessThan(receivedEvents[1].sequenceNumber); + sequenceNumber.should.be.lessThan(receivedEvents[2].sequenceNumber); + + [list[0], list[1], list[2]].should.be.deep.eq( + receivedEvents.map((event) => { + return { + body: event.body, + properties: event.properties + }; + }), + "Received messages should be equal to our sent messages" + ); + }); - [list[0], list[1], list[2]].should.be.deep.eq( - receivedEvents.map((event) => { - return { - body: event.body, - properties: event.properties - }; - }), - "Received messages should be equal to our sent messages" - ); - }); + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + const rootSpan = tracer.startSpan("root"); - const rootSpan = tracer.startSpan("root"); + const list = [{ name: "Albert" }, { name: "Marie" }]; - const list = [{ name: "Albert" }, { name: "Marie" }]; + const eventDataBatch = await producerClient.createBatch({ + partitionId: "0" + }); - const eventDataBatch = await producerClient.createBatch({ - partitionId: "0" - }); + for (let i = 0; i < 2; i++) { + eventDataBatch.tryAdd( + { body: `${list[i].name}` }, + { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + } + ); + } + await producerClient.sendBatch(eventDataBatch); + rootSpan.end(); - for (let i = 0; i < 2; i++) { - eventDataBatch.tryAdd( - { body: `${list[i].name}` }, - { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(2, "Should only have two root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + } + ] } - } - ); - } - await producerClient.sendBatch(eventDataBatch); - rootSpan.end(); + ] + }; - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(2, "Should only have two root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - } - ] - } - ] - }; + it("doesn't create empty spans when tracing is disabled", async () => { + const events: EventData[] = [{ body: "foo" }, { body: "bar" }]; - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + const eventDataBatch = await producerClient.createBatch(); - it("doesn't create empty spans when tracing is disabled", async () => { - const events: EventData[] = [{ body: "foo" }, { body: "bar" }]; + for (const event of events) { + eventDataBatch.tryAdd(event); + } - const eventDataBatch = await producerClient.createBatch(); + should.equal(eventDataBatch.count, 2, "Unexpected number of events in batch."); + should.equal( + eventDataBatch["_messageSpanContexts"].length, + 0, + "Unexpected number of span contexts in batch." + ); + }); - for (const event of events) { - eventDataBatch.tryAdd(event); + function legacyOptionsUsingSpanContext( + rootSpan: TestSpan + ): Pick { + return { + parentSpan: rootSpan.spanContext() + }; } - should.equal(eventDataBatch.count, 2, "Unexpected number of events in batch."); - should.equal( - eventDataBatch["_messageSpanContexts"].length, - 0, - "Unexpected number of span contexts in batch." - ); - }); + function legacyOptionsUsingSpan(rootSpan: TestSpan): Pick { + return { + parentSpan: rootSpan + }; + } - function legacyOptionsUsingSpanContext(rootSpan: TestSpan): Pick { - return { - parentSpan: rootSpan.spanContext() - }; - } - - function legacyOptionsUsingSpan(rootSpan: TestSpan): Pick { - return { - parentSpan: rootSpan - }; - } - - function modernOptions(rootSpan: TestSpan): OperationOptions { - return { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }; - } + function modernOptions(rootSpan: TestSpan): OperationOptions { + return { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }; + } - [legacyOptionsUsingSpan, legacyOptionsUsingSpanContext, modernOptions].forEach((optionsFn) => { - describe(`tracing (${optionsFn.name})`, () => { - it("will not instrument already instrumented events", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + [legacyOptionsUsingSpan, legacyOptionsUsingSpanContext, modernOptions].forEach( + (optionsFn) => { + describe(`tracing (${optionsFn.name})`, () => { + it("will not instrument already instrumented events", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - const rootSpan = tracer.startSpan("test"); + const rootSpan = tracer.startSpan("test"); - const list = [ - { name: "Albert" }, - { - name: "Marie", - properties: { - [TRACEPARENT_PROPERTY]: "foo" + const list = [ + { name: "Albert" }, + { + name: "Marie", + properties: { + [TRACEPARENT_PROPERTY]: "foo" + } + } + ]; + + const eventDataBatch = await producerClient.createBatch({ + partitionId: "0" + }); + + for (let i = 0; i < 2; i++) { + eventDataBatch.tryAdd( + { body: `${list[i].name}`, properties: list[i].properties }, + optionsFn(rootSpan) + ); } - } - ]; + await producerClient.sendBatch(eventDataBatch); + rootSpan.end(); - const eventDataBatch = await producerClient.createBatch({ - partitionId: "0" - }); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(2, "Should only have two root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - for (let i = 0; i < 2; i++) { - eventDataBatch.tryAdd( - { body: `${list[i].name}`, properties: list[i].properties }, - optionsFn(rootSpan) - ); - } - await producerClient.sendBatch(eventDataBatch); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(2, "Should only have two root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.message", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + } + ] } ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - - it("will support tracing batch and send", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + }; - const rootSpan = tracer.startSpan("root"); + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer + .getActiveSpans() + .length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - const list = [{ name: "Albert" }, { name: "Marie" }]; + it("will support tracing batch and send", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - const eventDataBatch = await producerClient.createBatch({ - partitionId: "0" - }); - for (let i = 0; i < 2; i++) { - eventDataBatch.tryAdd({ body: `${list[i].name}` }, optionsFn(rootSpan)); - } - await producerClient.sendBatch(eventDataBatch, { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - rootSpan.end(); + const rootSpan = tracer.startSpan("root"); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + const list = [{ name: "Albert" }, { name: "Marie" }]; - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, + const eventDataBatch = await producerClient.createBatch({ + partitionId: "0" + }); + for (let i = 0; i < 2; i++) { + eventDataBatch.tryAdd({ body: `${list[i].name}` }, optionsFn(rootSpan)); + } + await producerClient.sendBatch(eventDataBatch, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.send", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] } ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - }); - }); - - it("with partition key should be sent successfully.", async function(): Promise { - const eventDataBatch = await producerClient.createBatch({ partitionKey: "1" }); - for (let i = 0; i < 5; i++) { - eventDataBatch.tryAdd({ body: `Hello World ${i}` }); - } - await producerClient.sendBatch(eventDataBatch); - }); + }; - it("with max message size should be sent successfully.", async function(): Promise { - const eventDataBatch = await producerClient.createBatch({ - maxSizeInBytes: 5000, - partitionId: "0" - }); - const message = { body: `${Buffer.from("Z".repeat(4096))}` }; - for (let i = 1; i <= 3; i++) { - const isAdded = eventDataBatch.tryAdd(message); - if (!isAdded) { - debug(`Unable to add ${i} event to the batch`); - break; + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer + .getActiveSpans() + .length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + }); } - } - await producerClient.sendBatch(eventDataBatch); - eventDataBatch.count.should.equal(1); - }); - }); - - describe("Multiple sendBatch calls", function(): void { - it("should be sent successfully in parallel", async function(): Promise { - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - consumerClient ); - const promises = []; - for (let i = 0; i < 5; i++) { - promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); - } - await Promise.all(promises); + it("with partition key should be sent successfully.", async function(): Promise { + const eventDataBatch = await producerClient.createBatch({ partitionKey: "1" }); + for (let i = 0; i < 5; i++) { + eventDataBatch.tryAdd({ body: `Hello World ${i}` }); + } + await producerClient.sendBatch(eventDataBatch); + }); - const subscription = await consumerClient.subscribe(subscriptionEventHandler, { - startPosition + it("with max message size should be sent successfully.", async function(): Promise { + const eventDataBatch = await producerClient.createBatch({ + maxSizeInBytes: 5000, + partitionId: "0" + }); + const message = { body: `${Buffer.from("Z".repeat(4096))}` }; + for (let i = 1; i <= 3; i++) { + const isAdded = eventDataBatch.tryAdd(message); + if (!isAdded) { + debug(`Unable to add ${i} event to the batch`); + break; + } + } + await producerClient.sendBatch(eventDataBatch); + eventDataBatch.count.should.equal(1); }); + }); - try { - const events = await subscriptionEventHandler.waitForEvents( - await consumerClient.getPartitionIds({}), - 5 + describe("Multiple sendBatch calls", function(): void { + it("should be sent successfully in parallel", async function(): Promise { + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + consumerClient ); - // we've allowed the server to choose which partition the messages are distributed to - // so our expectation here is just that all the bodies have arrived - const bodiesOnly = events.map((evt) => evt.body); - bodiesOnly.sort(); - - bodiesOnly.should.deep.equal([ - "Hello World 0", - "Hello World 1", - "Hello World 2", - "Hello World 3", - "Hello World 4" - ]); - } finally { - subscription.close(); - } - }); - - it("should be sent successfully in parallel, even when exceeding max event listener count of 1000", async function(): Promise< - void - > { - const senderCount = 1200; - try { const promises = []; - for (let i = 0; i < senderCount; i++) { + for (let i = 0; i < 5; i++) { promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); } await Promise.all(promises); - } catch (err) { - debug("An error occurred while running the test: ", err); - throw err; - } - }); - it("should be sent successfully in parallel by multiple clients", async function(): Promise< - void - > { - const senderCount = 3; - try { - const promises = []; - for (let i = 0; i < senderCount; i++) { - if (i === 0) { - debug(">>>>> Sending a message to partition %d", i); - promises.push( - await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "0" }) - ); - } else if (i === 1) { - debug(">>>>> Sending a message to partition %d", i); - promises.push( - await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "1" }) - ); - } else { - debug(">>>>> Sending a message to the hub when i == %d", i); - promises.push(await producerClient.sendBatch([{ body: `Hello World ${i}` }])); - } - } - await Promise.all(promises); - } catch (err) { - debug("An error occurred while running the test: ", err); - throw err; - } - }); + const subscription = await consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); - it("should fail when a message greater than 1 MB is sent and succeed when a normal message is sent after that on the same link.", async function(): Promise< - void - > { - const data: EventData = { - body: Buffer.from("Z".repeat(1300000)) - }; - try { - debug("Sending a message of 300KB..."); - await producerClient.sendBatch([data], { partitionId: "0" }); - throw new Error("Test failure"); - } catch (err) { - debug(err); - should.exist(err); - should.equal(err.code, "MessageTooLargeError"); - err.message.should.match( - /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi - ); - } - await producerClient.sendBatch([{ body: "Hello World EventHub!!" }], { partitionId: "0" }); - debug("Sent the message successfully on the same link.."); - }); + try { + const events = await subscriptionEventHandler.waitForEvents( + await consumerClient.getPartitionIds({}), + 5 + ); + + // we've allowed the server to choose which partition the messages are distributed to + // so our expectation here is just that all the bodies have arrived + const bodiesOnly = events.map((evt) => evt.body); + bodiesOnly.sort(); + + bodiesOnly.should.deep.equal([ + "Hello World 0", + "Hello World 1", + "Hello World 2", + "Hello World 3", + "Hello World 4" + ]); + } finally { + subscription.close(); + } + }); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("should be sent successfully in parallel, even when exceeding max event listener count of 1000", async function(): Promise< + void + > { + const senderCount = 1200; + try { + const promises = []; + for (let i = 0; i < senderCount; i++) { + promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); + } + await Promise.all(promises); + } catch (err) { + debug("An error occurred while running the test: ", err); + throw err; + } + }); - const rootSpan = tracer.startSpan("root"); + it("should be sent successfully in parallel by multiple clients", async function(): Promise< + void + > { + const senderCount = 3; + try { + const promises = []; + for (let i = 0; i < senderCount; i++) { + if (i === 0) { + debug(">>>>> Sending a message to partition %d", i); + promises.push( + await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "0" }) + ); + } else if (i === 1) { + debug(">>>>> Sending a message to partition %d", i); + promises.push( + await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "1" }) + ); + } else { + debug(">>>>> Sending a message to the hub when i == %d", i); + promises.push(await producerClient.sendBatch([{ body: `Hello World ${i}` }])); + } + } + await Promise.all(promises); + } catch (err) { + debug("An error occurred while running the test: ", err); + throw err; + } + }); - const events = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - await producerClient.sendBatch(events, { - partitionId: "0", - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) + it("should fail when a message greater than 1 MB is sent and succeed when a normal message is sent after that on the same link.", async function(): Promise< + void + > { + const data: EventData = { + body: Buffer.from("Z".repeat(1300000)) + }; + try { + debug("Sending a message of 300KB..."); + await producerClient.sendBatch([data], { partitionId: "0" }); + throw new Error("Test failure"); + } catch (err) { + debug(err); + should.exist(err); + should.equal(err.code, "MessageTooLargeError"); + err.message.should.match( + /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi + ); } + await producerClient.sendBatch([{ body: "Hello World EventHub!!" }], { partitionId: "0" }); + debug("Sent the message successfully on the same link.."); }); - rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.send", - children: [] - } - ] - } - ] - }; + const rootSpan = tracer.startSpan("root"); - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + const events = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); + } + await producerClient.sendBatch(events, { + partitionId: "0", + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); - resetTracer(); - }); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - it("skips already instrumented events when manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; - const rootSpan = tracer.startSpan("root"); + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - const events: EventData[] = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; - await producerClient.sendBatch(events, { - partitionId: "0", - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + resetTracer(); }); - rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.send", - children: [] - } - ] - } - ] - }; + it("skips already instrumented events when manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + const rootSpan = tracer.startSpan("root"); - resetTracer(); - }); - }); - - describe("Array of events", function() { - it("should be sent successfully", async () => { - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); - } - }, - { - startPosition, - maxBatchSize: data.length + const events: EventData[] = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); } - ); + events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; + await producerClient.sendBatch(events, { + partitionId: "0", + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); - await producerClient.sendBatch(data); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; - await receivingPromise; - await subscription.close(); + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + resetTracer(); + }); }); - it("should be sent successfully with partitionKey", async () => { - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - { - async processError() { - /* no-op */ + describe("Array of events", function() { + it("should be sent successfully", async () => { + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; + + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + { + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); + } }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); + { + startPosition, + maxBatchSize: data.length } - }, - { - startPosition, - maxBatchSize: data.length - } - ); + ); - await producerClient.sendBatch(data, { partitionKey: "foo" }); + await producerClient.sendBatch(data); - await receivingPromise; - await subscription.close(); + await receivingPromise; + await subscription.close(); - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); - for (let i = 0; i < receivedEvents.length; i++) { - receivedEvents[i].body.should.equal(data[i].body); - } - }); + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + }); - it("should be sent successfully with partitionId", async () => { - const partitionId = "0"; - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - partitionId, - { - async processError() { - /* no-op */ + it("should be sent successfully with partitionKey", async () => { + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + { + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); + } }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); + { + startPosition, + maxBatchSize: data.length } - }, - { - startPosition, - maxBatchSize: data.length - } - ); - - await producerClient.sendBatch(data, { partitionId }); - - await receivingPromise; - await subscription.close(); - - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); - for (let i = 0; i < receivedEvents.length; i++) { - receivedEvents[i].body.should.equal(data[i].body); - } - }); + ); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + await producerClient.sendBatch(data, { partitionKey: "foo" }); - const rootSpan = tracer.startSpan("root"); + await receivingPromise; + await subscription.close(); - const events = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - await producerClient.sendBatch(events, { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + for (let i = 0; i < receivedEvents.length; i++) { + receivedEvents[i].body.should.equal(data[i].body); } }); - rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ + it("should be sent successfully with partitionId", async () => { + const partitionId = "0"; + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + partitionId, { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.send", - children: [] - } - ] + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); + } + }, + { + startPosition, + maxBatchSize: data.length } - ] - }; - - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - - const knownSendSpans = tracer - .getKnownSpans() - .filter((span: TestSpan) => span.name === "Azure.EventHubs.send"); - knownSendSpans.length.should.equal(1, "There should have been one send span."); - knownSendSpans[0].attributes.should.deep.equal({ - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": producerClient.eventHubName, - "peer.address": producerClient.fullyQualifiedNamespace - }); - resetTracer(); - }); + ); - it("skips already instrumented events when manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + await producerClient.sendBatch(data, { partitionId }); - const rootSpan = tracer.startSpan("root"); + await receivingPromise; + await subscription.close(); - const events: EventData[] = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; - await producerClient.sendBatch(events, { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + for (let i = 0; i < receivedEvents.length; i++) { + receivedEvents[i].body.should.equal(data[i].body); } }); - rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.send", - children: [] - } - ] + const rootSpan = tracer.startSpan("root"); + + const events = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); + } + await producerClient.sendBatch(events, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) } - ] - }; + }); + rootSpan.end(); - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - it("should throw when partitionId and partitionKey are provided", async function(): Promise< - void - > { - try { - const data: EventData[] = [ - { - body: "Sender paritition id and partition key" - } - ]; - await producerClient.sendBatch(data, { partitionKey: "1", partitionId: "0" }); - throw new Error("Test Failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (1) cannot both be specified." - ); - } - }); - }); + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; - describe("Validation", function() { - describe("createBatch", function() { - it("throws an error if partitionId and partitionKey are set", async () => { - try { - await producerClient.createBatch({ partitionId: "0", partitionKey: "boo" }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); - } + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + + const knownSendSpans = tracer + .getKnownSpans() + .filter((span: TestSpan) => span.name === "Azure.EventHubs.send"); + knownSendSpans.length.should.equal(1, "There should have been one send span."); + knownSendSpans[0].attributes.should.deep.equal({ + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": producerClient.eventHubName, + "peer.address": producerClient.fullyQualifiedNamespace + }); + resetTracer(); }); - it("throws an error if partitionId and partitionKey are set and partitionId is 0 i.e. falsy", async () => { - try { - await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0, - partitionKey: "boo" - }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); - } - }); + it("skips already instrumented events when manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - it("throws an error if partitionId and partitionKey are set and partitionKey is 0 i.e. falsy", async () => { - try { - await producerClient.createBatch({ - partitionId: "1", - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 - }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); + const rootSpan = tracer.startSpan("root"); + + const events: EventData[] = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); } + events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; + await producerClient.sendBatch(events, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); }); - it("should throw when maxMessageSize is greater than maximum message size on the AMQP sender link", async function(): Promise< + it("should throw when partitionId and partitionKey are provided", async function(): Promise< void > { try { - await producerClient.createBatch({ maxSizeInBytes: 2046528 }); + const data: EventData[] = [ + { + body: "Sender paritition id and partition key" + } + ]; + await producerClient.sendBatch(data, { partitionKey: "1", partitionId: "0" }); throw new Error("Test Failure"); - } catch (err) { - err.message.should.match( - /.*Max message size \((\d+) bytes\) is greater than maximum message size \((\d+) bytes\) on the AMQP sender link.*/gi - ); - } - }); - }); - describe("sendBatch with EventDataBatch", function() { - it("works if partitionKeys match", async () => { - const misconfiguredOptions: SendBatchOptions = { - partitionKey: "foo" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - await producerClient.sendBatch(batch, misconfiguredOptions); - }); - it("works if partitionIds match", async () => { - const misconfiguredOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionId: "0" }); - await producerClient.sendBatch(batch, misconfiguredOptions); - }); - it("throws an error if partitionKeys don't match", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "bar" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionKey (bar) set on sendBatch does not match the partitionKey (foo) set when creating the batch." - ); - } - }); - it("throws an error if partitionKeys don't match (undefined)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "bar" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionKey (bar) set on sendBatch does not match the partitionKey (undefined) set when creating the batch." - ); - } - }); - it("throws an error if partitionIds don't match", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionId: "1" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) set on sendBatch does not match the partitionId (1) set when creating the batch." - ); - } - }); - it("throws an error if partitionIds don't match (undefined)", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); } catch (err) { err.message.should.equal( - "The partitionId (0) set on sendBatch does not match the partitionId (undefined) set when creating the batch." + "The partitionId (0) and partitionKey (1) cannot both be specified." ); } }); - it("throws an error if partitionId and partitionKey are set (create, send)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo" - }; - const batch = await producerClient.createBatch({ partitionId: "0" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } + }); + + describe("Validation", function() { + describe("createBatch", function() { + it("throws an error if partitionId and partitionKey are set", async () => { + try { + await producerClient.createBatch({ partitionId: "0", partitionKey: "boo" }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } + }); + + it("throws an error if partitionId and partitionKey are set and partitionId is 0 i.e. falsy", async () => { + try { + await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0, + partitionKey: "boo" + }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } + }); + + it("throws an error if partitionId and partitionKey are set and partitionKey is 0 i.e. falsy", async () => { + try { + await producerClient.createBatch({ + partitionId: "1", + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } + }); + + it("should throw when maxMessageSize is greater than maximum message size on the AMQP sender link", async function(): Promise< + void + > { + try { + await producerClient.createBatch({ maxSizeInBytes: 2046528 }); + throw new Error("Test Failure"); + } catch (err) { + err.message.should.match( + /.*Max message size \((\d+) bytes\) is greater than maximum message size \((\d+) bytes\) on the AMQP sender link.*/gi + ); + } + }); }); - it("throws an error if partitionId and partitionKey are set (send, create)", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } + describe("sendBatch with EventDataBatch", function() { + it("works if partitionKeys match", async () => { + const misconfiguredOptions: SendBatchOptions = { + partitionKey: "foo" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + await producerClient.sendBatch(batch, misconfiguredOptions); + }); + it("works if partitionIds match", async () => { + const misconfiguredOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionId: "0" }); + await producerClient.sendBatch(batch, misconfiguredOptions); + }); + it("throws an error if partitionKeys don't match", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "bar" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionKey (bar) set on sendBatch does not match the partitionKey (foo) set when creating the batch." + ); + } + }); + it("throws an error if partitionKeys don't match (undefined)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "bar" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionKey (bar) set on sendBatch does not match the partitionKey (undefined) set when creating the batch." + ); + } + }); + it("throws an error if partitionIds don't match", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionId: "1" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) set on sendBatch does not match the partitionId (1) set when creating the batch." + ); + } + }); + it("throws an error if partitionIds don't match (undefined)", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) set on sendBatch does not match the partitionId (undefined) set when creating the batch." + ); + } + }); + it("throws an error if partitionId and partitionKey are set (create, send)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo" + }; + const batch = await producerClient.createBatch({ partitionId: "0" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); + it("throws an error if partitionId and partitionKey are set (send, create)", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); + it("throws an error if partitionId and partitionKey are set (send, send)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + partitionId: "0" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); }); - it("throws an error if partitionId and partitionKey are set (send, send)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - partitionId: "0" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } + + describe("sendBatch with EventDataBatch with events array", function() { + it("throws an error if partitionId and partitionKey are set", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + partitionId: "0" + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (foo) cannot both be specified." + ); + } + }); + it("throws an error if partitionId and partitionKey are set with partitionId set to 0 i.e. falsy", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (foo) cannot both be specified." + ); + } + }); + it("throws an error if partitionId and partitionKey are set with partitionKey set to 0 i.e. falsy", async () => { + const badOptions: SendBatchOptions = { + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0, + partitionId: "0" + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (0) cannot both be specified." + ); + } + }); }); }); - describe("sendBatch with EventDataBatch with events array", function() { - it("throws an error if partitionId and partitionKey are set", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - partitionId: "0" - }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (foo) cannot both be specified." - ); - } - }); - it("throws an error if partitionId and partitionKey are set with partitionId set to 0 i.e. falsy", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 + describe("Negative scenarios", function(): void { + it("a message greater than 1 MB should fail.", async function(): Promise { + const data: EventData = { + body: Buffer.from("Z".repeat(1300000)) }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; try { - await producerClient.sendBatch(batch, badOptions); + await producerClient.sendBatch([data]); throw new Error("Test failure"); } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (foo) cannot both be specified." - ); - } - }); - it("throws an error if partitionId and partitionKey are set with partitionKey set to 0 i.e. falsy", async () => { - const badOptions: SendBatchOptions = { - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0, - partitionId: "0" - }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (0) cannot both be specified." + debug(err); + should.exist(err); + should.equal(err.code, "MessageTooLargeError"); + err.message.should.match( + /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi ); } }); - }); - }); - - describe("Negative scenarios", function(): void { - it("a message greater than 1 MB should fail.", async function(): Promise { - const data: EventData = { - body: Buffer.from("Z".repeat(1300000)) - }; - try { - await producerClient.sendBatch([data]); - throw new Error("Test failure"); - } catch (err) { - debug(err); - should.exist(err); - should.equal(err.code, "MessageTooLargeError"); - err.message.should.match( - /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi - ); - } - }); - describe("on invalid partition ids like", function(): void { - // tslint:disable-next-line: no-null-keyword - const invalidIds = ["XYZ", "-1", "1000", "-"]; - invalidIds.forEach(function(id: string | null): void { - it(`"${id}" should throw an error`, async function(): Promise { - try { - debug("Created sender and will be sending a message to partition id ...", id); - await producerClient.sendBatch([{ body: "Hello world!" }], { - partitionId: id as any - }); - debug("sent the message."); - throw new Error("Test failure"); - } catch (err) { - debug(`>>>> Received error for invalid partition id "${id}" - `, err); - should.exist(err); - err.message.should.match( - /.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi - ); - } + describe("on invalid partition ids like", function(): void { + // tslint:disable-next-line: no-null-keyword + const invalidIds = ["XYZ", "-1", "1000", "-"]; + invalidIds.forEach(function(id: string | null): void { + it(`"${id}" should throw an error`, async function(): Promise { + try { + debug("Created sender and will be sending a message to partition id ...", id); + await producerClient.sendBatch([{ body: "Hello world!" }], { + partitionId: id as any + }); + debug("sent the message."); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error for invalid partition id "${id}" - `, err); + should.exist(err); + err.message.should.match( + /.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi + ); + } + }); }); }); }); - }); -}).timeout(20000); + }).timeout(20000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/amqpAnnotatedMessage.spec.ts b/sdk/eventhub/event-hubs/test/public/amqpAnnotatedMessage.spec.ts index 72c39e50b247..79e4aac5b5fe 100644 --- a/sdk/eventhub/event-hubs/test/public/amqpAnnotatedMessage.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/amqpAnnotatedMessage.spec.ts @@ -16,279 +16,296 @@ import { Subscription } from "../../src"; import { BodyTypes } from "../../src/dataTransformer"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); chai.use(chaiAsPromised); chai.use(chaiExclude); const assert = chai.assert; -const env = getEnvVars(); - -describe("AmqpAnnotatedMessage", function(): void { - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); - - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - function getSampleAmqpAnnotatedMessage(): AmqpAnnotatedMessage { - const randomTag = Math.random().toString(); - - return { - body: `message body ${randomTag}`, - bodyType: "data", - applicationProperties: { - propOne: 1, - propTwo: "two", - propThree: true, - propFour: Date() - }, - footer: { - propFooter: "foot" - }, - messageAnnotations: { propMsgAnnotate: "annotation" }, - properties: { - contentEncoding: "application/json; charset=utf-8", - correlationId: randomTag, - messageId: v4() - } - } as AmqpAnnotatedMessage; + after("Stopping mock service", () => { + return service?.stop(); + }); } + describe("AmqpAnnotatedMessage", function(): void { + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - /** - * Helper function that will receive a single event that comes after the starting positions. - * - * Note: Call this after sending a single event to Event Hubs to validate - * @internal - */ - async function receiveEvent(startingPositions: { - [partitionId: string]: EventPosition; - }): Promise { - return new Promise((resolve, reject) => { - const subscription: Subscription = consumerClient.subscribe( - { - async processError(err) { - reject(err); - return subscription.close(); - }, - async processEvents(events) { - if (events.length) { - resolve(events[0]); - return subscription.close(); - } - } - }, - { - startPosition: startingPositions - } + beforeEach(async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path ); }); - } - async function sendEvents(messages: AmqpAnnotatedMessage[], { useBatch }: { useBatch: boolean }) { - if (!useBatch) { - return producerClient.sendBatch(messages); - } + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); + }); - const batch = await producerClient.createBatch(); - for (const message of messages) { - assert.isTrue(batch.tryAdd(message)); + function getSampleAmqpAnnotatedMessage(): AmqpAnnotatedMessage { + const randomTag = Math.random().toString(); + + return { + body: `message body ${randomTag}`, + bodyType: "data", + applicationProperties: { + propOne: 1, + propTwo: "two", + propThree: true, + propFour: Date() + }, + footer: { + propFooter: "foot" + }, + messageAnnotations: { propMsgAnnotate: "annotation" }, + properties: { + contentEncoding: "application/json; charset=utf-8", + correlationId: randomTag, + messageId: v4() + } + } as AmqpAnnotatedMessage; } - return producerClient.sendBatch(batch); - } - - describe("round-tripping AMQP encoding/decoding", () => { - [{ useBatch: true }, { useBatch: false }].forEach(({ useBatch }) => { - it(`props (useBatch: ${useBatch})`, async () => { - const startingPositions = await getStartingPositionsForTests(consumerClient); - const testMessage = getSampleAmqpAnnotatedMessage(); - await sendEvents([testMessage], { useBatch }); - - const event = await receiveEvent(startingPositions); - should.equal(event.body, testMessage.body, "Unexpected body on the received event."); - should.equal( - event.getRawAmqpMessage().messageAnnotations!["propMsgAnnotate"], - testMessage.messageAnnotations!["propMsgAnnotate"], - "Unexpected messageAnnotations on the received event." - ); - assert.deepEqualExcluding( - event.getRawAmqpMessage(), - testMessage, - ["deliveryAnnotations", "body", "messageAnnotations", "header", "properties"], - "Unexpected on the AmqpAnnotatedMessage" - ); - assert.deepEqualExcluding( - event.getRawAmqpMessage().footer!, - testMessage.footer!, - ["deliveryCount"], - "Unexpected header on the AmqpAnnotatedMessage" - ); - assert.deepEqualExcluding( - event.getRawAmqpMessage().properties!, - testMessage.properties!, - ["creationTime", "absoluteExpiryTime", "groupId"], - "Unexpected properties on the AmqpAnnotatedMessage" - ); - assert.equal( - event.getRawAmqpMessage().properties!.groupId, - testMessage.properties!.groupId, - "Unexpected session-id on the AmqpAnnotatedMessage" + /** + * Helper function that will receive a single event that comes after the starting positions. + * + * Note: Call this after sending a single event to Event Hubs to validate + * @internal + */ + async function receiveEvent(startingPositions: { + [partitionId: string]: EventPosition; + }): Promise { + return new Promise((resolve, reject) => { + const subscription: Subscription = consumerClient.subscribe( + { + async processError(err) { + reject(err); + return subscription.close(); + }, + async processEvents(events) { + if (events.length) { + resolve(events[0]); + return subscription.close(); + } + } + }, + { + startPosition: startingPositions + } ); }); + } - it(`values (useBatch: ${useBatch})`, async () => { - const valueTypes = [[1, 2, 3], 1, 1.5, "hello", { hello: "world" }]; - for (const valueType of valueTypes) { - const startingPositions = await getStartingPositionsForTests(consumerClient); - await sendEvents( - [ - { - body: valueType, - bodyType: "value" - } - ], - { useBatch } - ); - - const event = await receiveEvent(startingPositions); - assert.deepEqual( - event.getRawAmqpMessage().bodyType, - "value", - `Should be identified as a value: ${valueType.toString()}` - ); + async function sendEvents( + messages: AmqpAnnotatedMessage[], + { useBatch }: { useBatch: boolean } + ) { + if (!useBatch) { + return producerClient.sendBatch(messages); + } - assert.deepEqual( - event.body, - valueType, - `Deserialized body should be equal: ${valueType.toString()}` - ); - } - }); + const batch = await producerClient.createBatch(); + for (const message of messages) { + assert.isTrue(batch.tryAdd(message)); + } - it(`sequences (useBatch: ${useBatch})`, async () => { - const sequenceTypes = [ - [[1], [2], [3]], - [1, 2, 3] - ]; + return producerClient.sendBatch(batch); + } - for (const sequenceType of sequenceTypes) { + describe("round-tripping AMQP encoding/decoding", () => { + [{ useBatch: true }, { useBatch: false }].forEach(({ useBatch }) => { + it(`props (useBatch: ${useBatch})`, async () => { const startingPositions = await getStartingPositionsForTests(consumerClient); - await sendEvents( - [ - { - body: sequenceType, - bodyType: "sequence" - } - ], - { useBatch } - ); + const testMessage = getSampleAmqpAnnotatedMessage(); + await sendEvents([testMessage], { useBatch }); const event = await receiveEvent(startingPositions); - assert.deepEqual( - event.getRawAmqpMessage().bodyType, - "sequence", - `Should be identified as a value: ${sequenceType.toString()}` + should.equal(event.body, testMessage.body, "Unexpected body on the received event."); + should.equal( + event.getRawAmqpMessage().messageAnnotations!["propMsgAnnotate"], + testMessage.messageAnnotations!["propMsgAnnotate"], + "Unexpected messageAnnotations on the received event." ); - - assert.deepEqual( - event.body, - sequenceType, - `Deserialized body should be equal: ${sequenceType.toString()}` + assert.deepEqualExcluding( + event.getRawAmqpMessage(), + testMessage, + ["deliveryAnnotations", "body", "messageAnnotations", "header", "properties"], + "Unexpected on the AmqpAnnotatedMessage" ); - } - }); - - it(`data (useBatch: ${useBatch})`, async () => { - const buff = Buffer.from("hello", "utf8"); - - const dataTypes = [1, 1.5, "hello", { hello: "world" }, buff, [1, 2, 3]]; - - for (const dataType of dataTypes) { - const startingPositions = await getStartingPositionsForTests(consumerClient); - await sendEvents( - [ - { - body: dataType, - bodyType: "data" - } - ], - { useBatch } - ); - - const event = await receiveEvent(startingPositions); - - assert.deepEqual( - event.getRawAmqpMessage().bodyType, - "data", - `Should be identified as data: ${dataType.toString()}` + assert.deepEqualExcluding( + event.getRawAmqpMessage().footer!, + testMessage.footer!, + ["deliveryCount"], + "Unexpected header on the AmqpAnnotatedMessage" ); - assert.deepEqual( - event.body, - dataType, - `Deserialized body should be equal: : ${dataType.toString()}` + assert.deepEqualExcluding( + event.getRawAmqpMessage().properties!, + testMessage.properties!, + ["creationTime", "absoluteExpiryTime", "groupId"], + "Unexpected properties on the AmqpAnnotatedMessage" ); - } - }); - - ([ - ["sequence", [1, 2, 3]], - ["value", "hello"], - ["data", "hello"] - ] as [BodyTypes, any][]).forEach(([expectedBodyType, expectedBody]) => { - it(`receive EventData and resend (useBatch: ${useBatch})`, async () => { - let startingPositions = await getStartingPositionsForTests(consumerClient); - // if we receive an event that was encoded to a non-data section - // and then re-send it (again, as an EventData) we should - // respect it. - await sendEvents( - [ - { - body: expectedBody, - bodyType: expectedBodyType - } - ], - { useBatch } + assert.equal( + event.getRawAmqpMessage().properties!.groupId, + testMessage.properties!.groupId, + "Unexpected session-id on the AmqpAnnotatedMessage" ); + }); - const event = await receiveEvent(startingPositions); - - assert.equal(event.getRawAmqpMessage().bodyType, expectedBodyType); + it(`values (useBatch: ${useBatch})`, async () => { + const valueTypes = [[1, 2, 3], 1, 1.5, "hello", { hello: "world" }]; + for (const valueType of valueTypes) { + const startingPositions = await getStartingPositionsForTests(consumerClient); + await sendEvents( + [ + { + body: valueType, + bodyType: "value" + } + ], + { useBatch } + ); + + const event = await receiveEvent(startingPositions); + assert.deepEqual( + event.getRawAmqpMessage().bodyType, + "value", + `Should be identified as a value: ${valueType.toString()}` + ); + + assert.deepEqual( + event.body, + valueType, + `Deserialized body should be equal: ${valueType.toString()}` + ); + } + }); - startingPositions = await getStartingPositionsForTests(consumerClient); - // now let's just resend it, unaltered - await sendEvents([event], { useBatch }); + it(`sequences (useBatch: ${useBatch})`, async () => { + const sequenceTypes = [ + [[1], [2], [3]], + [1, 2, 3] + ]; + + for (const sequenceType of sequenceTypes) { + const startingPositions = await getStartingPositionsForTests(consumerClient); + await sendEvents( + [ + { + body: sequenceType, + bodyType: "sequence" + } + ], + { useBatch } + ); + + const event = await receiveEvent(startingPositions); + assert.deepEqual( + event.getRawAmqpMessage().bodyType, + "sequence", + `Should be identified as a value: ${sequenceType.toString()}` + ); + + assert.deepEqual( + event.body, + sequenceType, + `Deserialized body should be equal: ${sequenceType.toString()}` + ); + } + }); - const reencodedEvent = await receiveEvent(startingPositions); + it(`data (useBatch: ${useBatch})`, async () => { + const buff = Buffer.from("hello", "utf8"); + + const dataTypes = [1, 1.5, "hello", { hello: "world" }, buff, [1, 2, 3]]; + + for (const dataType of dataTypes) { + const startingPositions = await getStartingPositionsForTests(consumerClient); + await sendEvents( + [ + { + body: dataType, + bodyType: "data" + } + ], + { useBatch } + ); + + const event = await receiveEvent(startingPositions); + + assert.deepEqual( + event.getRawAmqpMessage().bodyType, + "data", + `Should be identified as data: ${dataType.toString()}` + ); + assert.deepEqual( + event.body, + dataType, + `Deserialized body should be equal: : ${dataType.toString()}` + ); + } + }); - assert.equal(reencodedEvent.getRawAmqpMessage().bodyType, expectedBodyType); - assert.deepEqual(reencodedEvent.body, expectedBody); + ([ + ["sequence", [1, 2, 3]], + ["value", "hello"], + ["data", "hello"] + ] as [BodyTypes, any][]).forEach(([expectedBodyType, expectedBody]) => { + it(`receive ${expectedBodyType} EventData and resend (useBatch: ${useBatch})`, async () => { + let startingPositions = await getStartingPositionsForTests(consumerClient); + // if we receive an event that was encoded to a non-data section + // and then re-send it (again, as an EventData) we should + // respect it. + await sendEvents( + [ + { + body: expectedBody, + bodyType: expectedBodyType + } + ], + { useBatch } + ); + + const event = await receiveEvent(startingPositions); + + assert.equal(event.getRawAmqpMessage().bodyType, expectedBodyType); + + startingPositions = await getStartingPositionsForTests(consumerClient); + // now let's just resend it, unaltered + await sendEvents([event], { useBatch }); + + const reencodedEvent = await receiveEvent(startingPositions); + + assert.equal(reencodedEvent.getRawAmqpMessage().bodyType, expectedBodyType); + assert.deepEqual(reencodedEvent.body, expectedBody); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/auth.spec.ts b/sdk/eventhub/event-hubs/test/public/auth.spec.ts index dd8fce06f393..3b9ae3168c26 100644 --- a/sdk/eventhub/event-hubs/test/public/auth.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/auth.spec.ts @@ -11,369 +11,385 @@ import chai from "chai"; import { AzureNamedKeyCredential, AzureSASCredential } from "@azure/core-auth"; import { createSasTokenProvider } from "@azure/core-amqp"; import { SinonFakeTimers, useFakeTimers } from "sinon"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); -const env = getEnvVars(); const TEST_FAILURE = "test failure"; -describe("Authentication via", () => { - const { - endpoint, - fullyQualifiedNamespace, - sharedAccessKey, - sharedAccessKeyName - } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME], - endpoint: endpoint.replace(/\/+$/, "") - }; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); +testWithServiceTypes((serviceVersion, onVersions) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - let clock: SinonFakeTimers; - beforeEach("setup new space-time continuum", () => { - clock = useFakeTimers({ - now: new Date(), - shouldAdvanceTime: true + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + onVersions(["live"]).describe("Authentication via", () => { + const { + endpoint, + fullyQualifiedNamespace, + sharedAccessKey, + sharedAccessKeyName + } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME], + endpoint: endpoint.replace(/\/+$/, "") + }; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - }); - afterEach("returning back to current space-time variant", () => { - clock.restore(); - }); + let clock: SinonFakeTimers; + beforeEach("setup new space-time continuum", () => { + clock = useFakeTimers({ + now: new Date(), + shouldAdvanceTime: true + }); + }); - describe("AzureNamedKeyCredential", () => { - describe("supports key rotation", () => { - it("EventHubConsumerClient $management calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - // Rotate credential to invalid value. - namedKeyCredential.update("foo", "bar"); - try { - await consumerClient.getEventHubProperties(); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + afterEach("returning back to current space-time variant", () => { + clock.restore(); + }); - // Rotate credential to valid value. - namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - await consumerClient.getEventHubProperties(); - should.exist(properties); + describe("AzureNamedKeyCredential", () => { + describe("supports key rotation", () => { + it("EventHubConsumerClient $management calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); - return consumerClient.close(); - }); + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); - it("EventHubConsumerClient receive calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential, - { - retryOptions: { - maxRetries: 0 - } + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + // Rotate credential to invalid value. + namedKeyCredential.update("foo", "bar"); + try { + await consumerClient.getEventHubProperties(); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); } - ); - - await new Promise((resolve, reject) => { - // My attempt at defining the order of operations I expect to see. - const steps: Array<(...args: any[]) => void> = [ - // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 1 failed. Expected to see a list of events.")); - } - // Rotate credentials to invalid values and fast forward past the token refresh. - namedKeyCredential.update("foo", "bar"); - clock.tick(1000 * 60 * 45); - }, - // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 2 failed. Expected to see a list of events.")); - } - }, - // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. - // Rotate the credentials back to valid values. - (err: any) => { - if (err.code !== "UnauthorizedError") { - reject( - new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) - ); - } - // Rotate the credentials back to valid values. - namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - }, - // 4: observe another `processEvents` call. - // If the credentials were still invalid, we'd expect to see `processError` thrown instead. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 4 failed. Expected to see a list of events.")); - } - resolve(); - } - ]; - consumerClient.subscribe( - "0", + // Rotate credential to valid value. + namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); + await consumerClient.getEventHubProperties(); + should.exist(properties); + + return consumerClient.close(); + }); + + it("EventHubConsumerClient receive calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential, { - async processError(err) { - const step = steps.shift(); - if (step) step(err); - }, - async processEvents(events) { - const step = steps.shift(); - if (step) step(events); + retryOptions: { + maxRetries: 0 } - }, - { - maxWaitTimeInSeconds: 5 } ); + + await new Promise((resolve, reject) => { + // My attempt at defining the order of operations I expect to see. + const steps: Array<(...args: any[]) => void> = [ + // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 1 failed. Expected to see a list of events.")); + } + // Rotate credentials to invalid values and fast forward past the token refresh. + namedKeyCredential.update("foo", "bar"); + clock.tick(1000 * 60 * 45); + }, + // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 2 failed. Expected to see a list of events.")); + } + }, + // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. + // Rotate the credentials back to valid values. + (err: any) => { + if (err.code !== "UnauthorizedError") { + reject( + new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) + ); + } + // Rotate the credentials back to valid values. + namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); + }, + // 4: observe another `processEvents` call. + // If the credentials were still invalid, we'd expect to see `processError` thrown instead. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 4 failed. Expected to see a list of events.")); + } + resolve(); + } + ]; + + consumerClient.subscribe( + "0", + { + async processError(err) { + const step = steps.shift(); + if (step) step(err); + }, + async processEvents(events) { + const step = steps.shift(); + if (step) step(events); + } + }, + { + maxWaitTimeInSeconds: 5 + } + ); + }); + + return consumerClient.close(); }); - return consumerClient.close(); - }); + it("EventHubProducerClient send calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); - it("EventHubProducerClient send calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - namedKeyCredential, - { - retryOptions: { - maxRetries: 0 + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + namedKeyCredential, + { + retryOptions: { + maxRetries: 0 + } } - } - ); + ); - // The 1st sendBatch is called with valid credentials, so it should succeed. - await producerClient.sendBatch([{ body: "test" }]); + // The 1st sendBatch is called with valid credentials, so it should succeed. + await producerClient.sendBatch([{ body: "test" }]); - // Rotate credential to invalid value. - namedKeyCredential.update("foo", "bar"); - // Fast forward through time to after the token refresh. - clock.tick(1000 * 60 * 45); + // Rotate credential to invalid value. + namedKeyCredential.update("foo", "bar"); + // Fast forward through time to after the token refresh. + clock.tick(1000 * 60 * 45); - try { - // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. - await producerClient.sendBatch([{ body: "I don't have access." }]); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + try { + // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. + await producerClient.sendBatch([{ body: "I don't have access." }]); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); + } - // Rotate credential to valid value. - namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); + // Rotate credential to valid value. + namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - // This last sendBatch should succeed because we've updated our credentials again. - // Notice that we didn't have to fast forward through time to move past a token refresh! - await producerClient.sendBatch([{ body: "test2" }]); + // This last sendBatch should succeed because we've updated our credentials again. + // Notice that we didn't have to fast forward through time to move past a token refresh! + await producerClient.sendBatch([{ body: "test2" }]); - return producerClient.close(); + return producerClient.close(); + }); }); }); - }); - describe("AzureSASCredential", () => { - function getSas(): string { - return createSasTokenProvider({ - sharedAccessKeyName: sharedAccessKeyName!, - sharedAccessKey: sharedAccessKey! - }).getToken(`${service.endpoint}/${service.path}`).token; - } - - describe("supports key rotation", () => { - it("EventHubConsumerClient $management calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 + describe("AzureSASCredential", () => { + function getSas(): string { + return createSasTokenProvider({ + sharedAccessKeyName: sharedAccessKeyName!, + sharedAccessKey: sharedAccessKey! + }).getToken(`${service.endpoint}/${service.path}`).token; + } + + describe("supports key rotation", () => { + it("EventHubConsumerClient $management calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential, + { + retryOptions: { + maxRetries: 0 + } } - } - ); + ); - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); - // Rotate credential to invalid value. - sasCredential.update( - `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` - ); - try { + // Rotate credential to invalid value. + sasCredential.update( + `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` + ); + try { + await consumerClient.getEventHubProperties(); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); + } + + // Rotate credential to valid value. + sasCredential.update(getSas()); await consumerClient.getEventHubProperties(); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + should.exist(properties); - // Rotate credential to valid value. - sasCredential.update(getSas()); - await consumerClient.getEventHubProperties(); - should.exist(properties); + return consumerClient.close(); + }); - return consumerClient.close(); - }); + it("EventHubConsumerClient receive calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); - it("EventHubConsumerClient receive calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 - } - } - ); - - await new Promise((resolve, reject) => { - // My attempt at defining the order of operations I expect to see. - const steps: Array<(...args: any[]) => void> = [ - // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 1 failed. Expected to see a list of events.")); - } - // Rotate credentials to invalid values and fast forward past the token refresh. - sasCredential.update( - `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` - ); - clock.tick(1000 * 60 * 45); - }, - // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 2 failed. Expected to see a list of events.")); + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential, + { + retryOptions: { + maxRetries: 0 } - }, - // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. - // Rotate the credentials back to valid values. - (err: any) => { - if (err.code !== "UnauthorizedError") { - reject( - new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) + } + ); + + await new Promise((resolve, reject) => { + // My attempt at defining the order of operations I expect to see. + const steps: Array<(...args: any[]) => void> = [ + // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 1 failed. Expected to see a list of events.")); + } + // Rotate credentials to invalid values and fast forward past the token refresh. + sasCredential.update( + `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` ); - } + clock.tick(1000 * 60 * 45); + }, + // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 2 failed. Expected to see a list of events.")); + } + }, + // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. // Rotate the credentials back to valid values. - sasCredential.update(getSas()); - }, - // 4: observe another `processEvents` call. - // If the credentials were still invalid, we'd expect to see `processError` thrown instead. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 4 failed. Expected to see a list of events.")); + (err: any) => { + if (err.code !== "UnauthorizedError") { + reject( + new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) + ); + } + // Rotate the credentials back to valid values. + sasCredential.update(getSas()); + }, + // 4: observe another `processEvents` call. + // If the credentials were still invalid, we'd expect to see `processError` thrown instead. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 4 failed. Expected to see a list of events.")); + } + resolve(); } - resolve(); - } - ]; - - consumerClient.subscribe( - "0", - { - async processError(err) { - const step = steps.shift(); - if (step) step(err); + ]; + + consumerClient.subscribe( + "0", + { + async processError(err) { + const step = steps.shift(); + if (step) step(err); + }, + async processEvents(events) { + const step = steps.shift(); + if (step) step(events); + } }, - async processEvents(events) { - const step = steps.shift(); - if (step) step(events); + { + maxWaitTimeInSeconds: 5 } - }, + ); + }); + + return consumerClient.close(); + }); + + it("EventHubProducerClient send calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + sasCredential, { - maxWaitTimeInSeconds: 5 + retryOptions: { + maxRetries: 0 + } } ); - }); - return consumerClient.close(); - }); + // The 1st sendBatch is called with valid credentials, so it should succeed. + await producerClient.sendBatch([{ body: "test" }]); - it("EventHubProducerClient send calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); - - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 - } + // Rotate credential to invalid value. + sasCredential.update( + `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` + ); + // Fast forward through time to after the token refresh. + clock.tick(1000 * 60 * 45); + + try { + // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. + await producerClient.sendBatch([{ body: "I don't have access." }]); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); } - ); - - // The 1st sendBatch is called with valid credentials, so it should succeed. - await producerClient.sendBatch([{ body: "test" }]); - - // Rotate credential to invalid value. - sasCredential.update( - `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` - ); - // Fast forward through time to after the token refresh. - clock.tick(1000 * 60 * 45); - - try { - // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. - await producerClient.sendBatch([{ body: "I don't have access." }]); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } - - // Rotate credential to valid value. - sasCredential.update(getSas()); - - // This last sendBatch should succeed because we've updated our credentials again. - // Notice that we didn't have to fast forward through time to move past a token refresh! - await producerClient.sendBatch([{ body: "test2" }]); - - return producerClient.close(); + + // Rotate credential to valid value. + sasCredential.update(getSas()); + + // This last sendBatch should succeed because we've updated our credentials again. + // Notice that we didn't have to fast forward through time to move past a token refresh! + await producerClient.sendBatch([{ body: "test2" }]); + + return producerClient.close(); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/cancellation.spec.ts b/sdk/eventhub/event-hubs/test/public/cancellation.spec.ts index 38074e548b34..00d2169d6849 100644 --- a/sdk/eventhub/event-hubs/test/public/cancellation.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/cancellation.spec.ts @@ -6,165 +6,181 @@ import chai from "chai"; const should = chai.should(); import chaiAsPromised from "chai-as-promised"; import { EventHubConsumerClient, EventHubProducerClient } from "../../src"; +import { createMockServer } from "./utils/mockService"; chai.use(chaiAsPromised); import { EnvVarKeys, getEnvVars } from "./utils/testUtils"; -const env = getEnvVars(); - -describe("Cancellation via AbortSignal", () => { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", () => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - const TEST_FAILURE = "Test failure"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - const cancellationCases = [ - { - type: "pre-aborted", - getSignal() { - const controller = new AbortController(); - controller.abort(); - return controller.signal; - } - }, - { - type: "aborted after timeout", - getSignal() { - const controller = new AbortController(); - setTimeout(() => { - controller.abort(); - }, 0); - return controller.signal; - } - } - ]; - - describe("EventHubConsumerClient", () => { - let consumerClient: EventHubConsumerClient; - beforeEach("instantiate EventHubConsumerClient", () => { - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("Cancellation via AbortSignal", () => { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", () => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); }); - afterEach("close EventHubConsumerClient", () => { - return consumerClient.close(); - }); + const TEST_FAILURE = "Test failure"; - for (const { type: caseType, getSignal } of cancellationCases) { - it(`getEventHubProperties supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await consumerClient.getEventHubProperties({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); + const cancellationCases = [ + { + type: "pre-aborted", + getSignal() { + const controller = new AbortController(); + controller.abort(); + return controller.signal; } - }); - - it(`getPartitionIds supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await consumerClient.getPartitionIds({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); + }, + { + type: "aborted after timeout", + getSignal() { + const controller = new AbortController(); + setTimeout(() => { + controller.abort(); + }, 0); + return controller.signal; } + } + ]; + + describe("EventHubConsumerClient", () => { + let consumerClient: EventHubConsumerClient; + beforeEach("instantiate EventHubConsumerClient", () => { + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); }); - it(`getPartitionProperties supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await consumerClient.getPartitionProperties("0", { abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } + afterEach("close EventHubConsumerClient", () => { + return consumerClient.close(); }); - } - }); - describe("EventHubProducerClient", () => { - let producerClient: EventHubProducerClient; - beforeEach("instantiate EventHubProducerClient", () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - }); - - afterEach("close EventHubProducerClient", () => { - return producerClient.close(); + for (const { type: caseType, getSignal } of cancellationCases) { + it(`getEventHubProperties supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await consumerClient.getEventHubProperties({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`getPartitionIds supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await consumerClient.getPartitionIds({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`getPartitionProperties supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await consumerClient.getPartitionProperties("0", { abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + } }); - for (const { type: caseType, getSignal } of cancellationCases) { - it(`getEventHubProperties supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await producerClient.getEventHubProperties({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); - - it(`getPartitionIds supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await producerClient.getPartitionIds({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } + describe("EventHubProducerClient", () => { + let producerClient: EventHubProducerClient; + beforeEach("instantiate EventHubProducerClient", () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); }); - it(`getPartitionProperties supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await producerClient.getPartitionProperties("0", { abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } + afterEach("close EventHubProducerClient", () => { + return producerClient.close(); }); - it(`createBatch supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await producerClient.createBatch({ abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); - - it(`sendBatch supports cancellation (${caseType})`, async () => { - const abortSignal = getSignal(); - try { - await producerClient.sendBatch([{ body: "unsung hero" }], { abortSignal }); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.name, "AbortError"); - should.equal(err.message, "The operation was aborted."); - } - }); - } + for (const { type: caseType, getSignal } of cancellationCases) { + it(`getEventHubProperties supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await producerClient.getEventHubProperties({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`getPartitionIds supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await producerClient.getPartitionIds({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`getPartitionProperties supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await producerClient.getPartitionProperties("0", { abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`createBatch supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await producerClient.createBatch({ abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + + it(`sendBatch supports cancellation (${caseType})`, async () => { + const abortSignal = getSignal(); + try { + await producerClient.sendBatch([{ body: "unsung hero" }], { abortSignal }); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.name, "AbortError"); + should.equal(err.message, "The operation was aborted."); + } + }); + } + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/eventData.spec.ts b/sdk/eventhub/event-hubs/test/public/eventData.spec.ts index 16105d656921..5b00ebf02e7d 100644 --- a/sdk/eventhub/event-hubs/test/public/eventData.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/eventData.spec.ts @@ -14,119 +14,134 @@ import { ReceivedEventData, Subscription } from "../../src"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); chai.use(chaiAsPromised); chai.use(chaiExclude); -const env = getEnvVars(); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); -describe("EventData", function(): void { - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; + after("Stopping mock service", () => { + return service?.stop(); + }); + } - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + describe("EventData", function(): void { + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; - beforeEach(async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); + beforeEach(async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + }); - function getSampleEventData(): EventData { - const randomTag = Math.random().toString(); + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); + }); - return { - body: `message body ${randomTag}`, - contentEncoding: "application/json; charset=utf-8", - correlationId: randomTag, - messageId: v4() - } as EventData; - } + function getSampleEventData(): EventData { + const randomTag = Math.random().toString(); - /** - * Helper function that will receive a single event that comes after the starting positions. - * - * Note: Call this after sending a single event to Event Hubs to validate - * @internal - */ - async function receiveEvent(startingPositions: { - [partitionId: string]: EventPosition; - }): Promise { - return new Promise((resolve, reject) => { - const subscription: Subscription = consumerClient.subscribe( - { - async processError(err) { - reject(err); - return subscription.close(); - }, - async processEvents(events) { - if (events.length) { - resolve(events[0]); + return { + body: `message body ${randomTag}`, + contentEncoding: "application/json; charset=utf-8", + correlationId: randomTag, + messageId: v4() + } as EventData; + } + + /** + * Helper function that will receive a single event that comes after the starting positions. + * + * Note: Call this after sending a single event to Event Hubs to validate + * @internal + */ + async function receiveEvent(startingPositions: { + [partitionId: string]: EventPosition; + }): Promise { + return new Promise((resolve, reject) => { + const subscription: Subscription = consumerClient.subscribe( + { + async processError(err) { + reject(err); return subscription.close(); + }, + async processEvents(events) { + if (events.length) { + resolve(events[0]); + return subscription.close(); + } } + }, + { + startPosition: startingPositions } - }, - { - startPosition: startingPositions - } - ); - }); - } + ); + }); + } - describe("round-tripping AMQP encoding/decoding", () => { - it(`props`, async () => { - const startingPositions = await getStartingPositionsForTests(consumerClient); - const testEvent = getSampleEventData(); - await producerClient.sendBatch([testEvent]); + describe("round-tripping AMQP encoding/decoding", () => { + it(`props`, async () => { + const startingPositions = await getStartingPositionsForTests(consumerClient); + const testEvent = getSampleEventData(); + await producerClient.sendBatch([testEvent]); - const event = await receiveEvent(startingPositions); - should.equal(event.body, testEvent.body, "Unexpected body on the received event."); - should.equal( - event.contentType, - testEvent.contentType, - "Unexpected contentType on the received event." - ); - should.equal( - event.correlationId, - testEvent.correlationId, - "Unexpected correlationId on the received event." - ); - should.equal( - event.messageId, - testEvent.messageId, - "Unexpected messageId on the received event." - ); - }); + const event = await receiveEvent(startingPositions); + should.equal(event.body, testEvent.body, "Unexpected body on the received event."); + should.equal( + event.contentType, + testEvent.contentType, + "Unexpected contentType on the received event." + ); + should.equal( + event.correlationId, + testEvent.correlationId, + "Unexpected correlationId on the received event." + ); + should.equal( + event.messageId, + testEvent.messageId, + "Unexpected messageId on the received event." + ); + }); - it(`null body`, async () => { - const startingPositions = await getStartingPositionsForTests(consumerClient); - const testEvent: EventData = { body: null }; - await producerClient.sendBatch([testEvent]); + it(`null body`, async () => { + const startingPositions = await getStartingPositionsForTests(consumerClient); + const testEvent: EventData = { body: null }; + await producerClient.sendBatch([testEvent]); - const event = await receiveEvent(startingPositions); - should.equal(event.body, testEvent.body, "Unexpected body on the received event."); + const event = await receiveEvent(startingPositions); + should.equal(event.body, testEvent.body, "Unexpected body on the received event."); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts b/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts index df503eeca73d..d037f84e70e6 100644 --- a/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts @@ -16,1261 +16,1285 @@ import { } from "../../src"; import debugModule from "debug"; const debug = debugModule("azure:event-hubs:receiver-spec"); -import { EnvVarKeys, getEnvVars, loopUntil, getStartingPositionsForTests } from "./utils/testUtils"; +import { EnvVarKeys, loopUntil, getStartingPositionsForTests, getEnvVars } from "./utils/testUtils"; import chai from "chai"; import { ReceivedMessagesTester } from "./utils/receivedMessagesTester"; import { LogTester } from "./utils/logHelpers"; import { TestInMemoryCheckpointStore } from "./utils/testInMemoryCheckpointStore"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); -const env = getEnvVars(); - -describe("EventHubConsumerClient", () => { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - - afterEach("Closing the clients", () => { - return Promise.all([producerClient.close(), consumerClient.close()]); - }); - - describe("functional tests", () => { - let clients: EventHubConsumerClient[]; - let subscriptions: Subscription[]; - - beforeEach(() => { - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - clients = []; - subscriptions = []; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); }); - afterEach(async () => { - for (const subscription of subscriptions) { - await subscription.close(); - } + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("EventHubConsumerClient", () => { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - await Promise.all(clients.map((client) => client.close())); - clients = []; + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + partitionIds = await producerClient.getPartitionIds({}); }); - describe("#close()", function(): void { - it("stops any actively running subscriptions", async function(): Promise { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + afterEach("Closing the clients", () => { + return Promise.all([producerClient.close(), consumerClient.close()]); + }); - // Spin up multiple subscriptions. - for (const partitionId of partitionIds) { - subscriptions.push( - client.subscribe(partitionId, { - async processError() { - /* no-op for test */ - }, - async processEvents() { - /* no-op for test */ - } - }) - ); - } + describe("functional tests", () => { + let clients: EventHubConsumerClient[]; + let subscriptions: Subscription[]; - // Assert that the subscriptions are all running. - for (const subscription of subscriptions) { - subscription.isRunning.should.equal(true, "The subscription should be running."); - } + beforeEach(() => { + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - // Stop the client, which should stop the subscriptions. - await client.close(); + clients = []; + subscriptions = []; + }); - // Assert that the subscriptions are all not running. + afterEach(async () => { for (const subscription of subscriptions) { - subscription.isRunning.should.equal(false, "The subscription should not be running."); + await subscription.close(); } - client["_subscriptions"].size.should.equal( - 0, - "Some dangling subscriptions are still hanging around!" - ); + await Promise.all(clients.map((client) => client.close())); + clients = []; }); - it("gracefully stops running subscriptions", async function(): Promise { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - - const startingPositions = await getStartingPositionsForTests(client); + describe("#close()", function(): void { + it("stops any actively running subscriptions", async function(): Promise { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - let waitForInitializeResolver: () => void; - const waitForInitialize = new Promise( - (resolve) => (waitForInitializeResolver = resolve) - ); - let waitForCloseResolver: (reason: CloseReason) => void; - const waitForClose = new Promise( - (resolve) => (waitForCloseResolver = resolve) - ); - let unexpectedError: Error | undefined; - let eventsWereReceived = false; - - const subscription = client.subscribe( - partitionIds[0], - { - async processInitialize() { - waitForInitializeResolver(); - }, - async processError(err) { - unexpectedError = err; - }, - async processEvents() { - eventsWereReceived = true; - }, - async processClose(reason) { - waitForCloseResolver(reason); - } - }, - { - startPosition: startingPositions + // Spin up multiple subscriptions. + for (const partitionId of partitionIds) { + subscriptions.push( + client.subscribe(partitionId, { + async processError() { + /* no-op for test */ + }, + async processEvents() { + /* no-op for test */ + } + }) + ); } - ); - - // Assert that the subscription is running. - subscription.isRunning.should.equal(true, "The subscription should be running."); - - // Wait until we see a `processInitialze` handler get invoked. - // This lets us know that the subscription is starting to read from a partition. - await waitForInitialize; - - // Stop the client, which should stop the subscriptions. - await client.close(); - - // Ensure that the `processClose` handler was invoked with the expected reason. - const closeReason = await waitForClose; - closeReason.should.equal( - CloseReason.Shutdown, - "Subscription closed for an unexpected reason." - ); - - // Ensure no errors were thrown. - should.not.exist(unexpectedError, "Did not expect to observe an error."); - // Ensure the event handler wasn't called. - eventsWereReceived.should.equal(false, "Should not have received events."); + // Assert that the subscriptions are all running. + for (const subscription of subscriptions) { + subscription.isRunning.should.equal(true, "The subscription should be running."); + } - // Assert that the subscription is not running. - subscription.isRunning.should.equal(false, "The subscription should not be running."); + // Stop the client, which should stop the subscriptions. + await client.close(); - client["_subscriptions"].size.should.equal( - 0, - "Some dangling subscriptions are still hanging around!" - ); - }); - }); + // Assert that the subscriptions are all not running. + for (const subscription of subscriptions) { + subscription.isRunning.should.equal(false, "The subscription should not be running."); + } - describe("Reinitialize partition processing after error", function(): void { - it("when subscribed to single partition", async function(): Promise { - const partitionId = "0"; - const consumerClient1 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - const consumerClient2 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + client["_subscriptions"].size.should.equal( + 0, + "Some dangling subscriptions are still hanging around!" + ); + }); - clients.push(consumerClient1, consumerClient2); - let subscription2: Subscription | undefined; - const subscriptionHandlers2: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents() { - // stop this subscription since it already should have forced the 1st subscription to have an error. - await subscription2!.close(); - } - }; + it("gracefully stops running subscriptions", async function(): Promise { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - // keep track of the handlers called on subscription 1 - const handlerCalls = { - initialize: 0, - close: 0 - }; + const startingPositions = await getStartingPositionsForTests(client); - const subscription1 = consumerClient1.subscribe( - partitionId, - { - async processError() { - /* no-op */ - }, - async processEvents() { - if (!handlerCalls.close) { - // start the 2nd subscription that will kick the 1st subscription off - subscription2 = consumerClient2.subscribe(partitionId, subscriptionHandlers2, { - ownerLevel: 1, - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 - }); - } else { - // stop this subscription, we know close was called so we've restarted - await subscription1.close(); + let waitForInitializeResolver: () => void; + const waitForInitialize = new Promise( + (resolve) => (waitForInitializeResolver = resolve) + ); + let waitForCloseResolver: (reason: CloseReason) => void; + const waitForClose = new Promise( + (resolve) => (waitForCloseResolver = resolve) + ); + let unexpectedError: Error | undefined; + let eventsWereReceived = false; + + const subscription = client.subscribe( + partitionIds[0], + { + async processInitialize() { + waitForInitializeResolver(); + }, + async processError(err) { + unexpectedError = err; + }, + async processEvents() { + eventsWereReceived = true; + }, + async processClose(reason) { + waitForCloseResolver(reason); } }, - async processClose() { - handlerCalls.close++; - }, - async processInitialize() { - handlerCalls.initialize++; + { + startPosition: startingPositions } - }, - { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 - } - ); + ); - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to recover", - timeBetweenRunsMs: 5000, - async until() { - return !subscription1.isRunning && !subscription2!.isRunning; - } - }); + // Assert that the subscription is running. + subscription.isRunning.should.equal(true, "The subscription should be running."); - // Initialize may be called multiple times while the 2nd subscription is running. - // We want to make sure it has been called at least twice to verify that subscription1 - // attempts to recover. - handlerCalls.initialize.should.be.greaterThan(1); - handlerCalls.close.should.be.greaterThan(1); - }); + // Wait until we see a `processInitialze` handler get invoked. + // This lets us know that the subscription is starting to read from a partition. + await waitForInitialize; - it("when subscribed to multiple partitions", async function(): Promise { - const consumerClient1 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path, - { loadBalancingOptions: { updateIntervalInMs: 1000 } } - ); - const consumerClient2 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path, - { loadBalancingOptions: { updateIntervalInMs: 1000 } } - ); + // Stop the client, which should stop the subscriptions. + await client.close(); - clients.push(consumerClient1, consumerClient2); + // Ensure that the `processClose` handler was invoked with the expected reason. + const closeReason = await waitForClose; + closeReason.should.equal( + CloseReason.Shutdown, + "Subscription closed for an unexpected reason." + ); - const partitionHandlerCalls: { - [partitionId: string]: { - initialize: number; - processEvents: boolean; - close: number; - }; - } = {}; + // Ensure no errors were thrown. + should.not.exist(unexpectedError, "Did not expect to observe an error."); - // keep track of the handlers called on subscription 1 - for (const id of partitionIds) { - partitionHandlerCalls[id] = { initialize: 0, processEvents: false, close: 0 }; - } + // Ensure the event handler wasn't called. + eventsWereReceived.should.equal(false, "Should not have received events."); - const subscriptionHandlers1: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents(_, context) { - partitionHandlerCalls[context.partitionId].processEvents = true; - }, - async processClose(_, context) { - partitionHandlerCalls[context.partitionId].close++; - // reset processEvents count - partitionHandlerCalls[context.partitionId].processEvents = false; - }, - async processInitialize(context) { - partitionHandlerCalls[context.partitionId].initialize++; - } - }; + // Assert that the subscription is not running. + subscription.isRunning.should.equal(false, "The subscription should not be running."); - const subscription1 = consumerClient1.subscribe(subscriptionHandlers1, { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 + client["_subscriptions"].size.should.equal( + 0, + "Some dangling subscriptions are still hanging around!" + ); }); + }); - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to read from all partitions", - timeBetweenRunsMs: 1000, - async until() { - // wait until we've seen processEvents invoked for each partition. - return ( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].processEvents; - }).length === partitionIds.length - ); - } - }); + describe("Reinitialize partition processing after error", function(): void { + it("when subscribed to single partition", async function(): Promise { + const partitionId = "0"; + const consumerClient1 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + const consumerClient2 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - const partitionsReadFromSub2 = new Set(); - const subscriptionHandlers2: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents(_, context) { - partitionsReadFromSub2.add(context.partitionId); - } - }; + clients.push(consumerClient1, consumerClient2); + let subscription2: Subscription | undefined; + const subscriptionHandlers2: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents() { + // stop this subscription since it already should have forced the 1st subscription to have an error. + await subscription2!.close(); + } + }; - // start 2nd subscription with an ownerLevel so it triggers the close handlers on the 1st subscription. - const subscription2 = consumerClient2.subscribe(subscriptionHandlers2, { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - ownerLevel: 1 - }); + // keep track of the handlers called on subscription 1 + const handlerCalls = { + initialize: 0, + close: 0 + }; - await loopUntil({ - maxTimes: 10, - name: - "Wait for subscription2 to read from all partitions and subscription1 to invoke close handlers", - timeBetweenRunsMs: 1000, - async until() { - const sub1CloseHandlersCalled = Boolean( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].close > 0; - }).length === partitionIds.length - ); - return partitionsReadFromSub2.size === partitionIds.length && sub1CloseHandlersCalled; - } - }); + const subscription1 = consumerClient1.subscribe( + partitionId, + { + async processError() { + /* no-op */ + }, + async processEvents() { + if (!handlerCalls.close) { + // start the 2nd subscription that will kick the 1st subscription off + subscription2 = consumerClient2.subscribe(partitionId, subscriptionHandlers2, { + ownerLevel: 1, + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + }); + } else { + // stop this subscription, we know close was called so we've restarted + await subscription1.close(); + } + }, + async processClose() { + handlerCalls.close++; + }, + async processInitialize() { + handlerCalls.initialize++; + } + }, + { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + } + ); - // close subscription2 so subscription1 can recover. - await subscription2.close(); + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to recover", + timeBetweenRunsMs: 5000, + async until() { + return !subscription1.isRunning && !subscription2!.isRunning; + } + }); - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to recover", - timeBetweenRunsMs: 1000, - async until() { - // wait until we've seen an additional processEvent for each partition. - return ( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].processEvents; - }).length === partitionIds.length - ); - } + // Initialize may be called multiple times while the 2nd subscription is running. + // We want to make sure it has been called at least twice to verify that subscription1 + // attempts to recover. + handlerCalls.initialize.should.be.greaterThan(1); + handlerCalls.close.should.be.greaterThan(1); }); - await subscription1.close(); - - for (const id of partitionIds) { - partitionHandlerCalls[id].initialize.should.be.greaterThan( - 1, - `Initialize on partition ${id} was not called more than 1 time.` + it("when subscribed to multiple partitions", async function(): Promise { + const consumerClient1 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path, + { loadBalancingOptions: { updateIntervalInMs: 1000 } } ); - partitionHandlerCalls[id].close.should.be.greaterThan( - 1, - `Close on partition ${id} was not called more than 1 time.` + const consumerClient2 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path, + { loadBalancingOptions: { updateIntervalInMs: 1000 } } ); - } - }); - }); - it("Receive from specific partitions, no coordination", async function(): Promise { - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to specific partition (0), no checkpoint store.", - "Single partition target: 0", - "No partitions owned, skipping abandoning." - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); + clients.push(consumerClient1, consumerClient2); - const tester = new ReceivedMessagesTester(["0"], false); + const partitionHandlerCalls: { + [partitionId: string]: { + initialize: number; + processEvents: boolean; + close: number; + }; + } = {}; - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); + // keep track of the handlers called on subscription 1 + for (const id of partitionIds) { + partitionHandlerCalls[id] = { initialize: 0, processEvents: false, close: 0 }; + } - const startPosition = await getStartingPositionsForTests(clients[0]); - const subscription = clients[0].subscribe("0", tester, { startPosition }); + const subscriptionHandlers1: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents(_, context) { + partitionHandlerCalls[context.partitionId].processEvents = true; + }, + async processClose(_, context) { + partitionHandlerCalls[context.partitionId].close++; + // reset processEvents count + partitionHandlerCalls[context.partitionId].processEvents = false; + }, + async processInitialize(context) { + partitionHandlerCalls[context.partitionId].initialize++; + } + }; - subscriptions.push(subscription); + const subscription1 = consumerClient1.subscribe(subscriptionHandlers1, { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + }); - await tester.runTestAndPoll(producerClient); - await subscription.close(); // or else we won't see the partition abandoning messages + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to read from all partitions", + timeBetweenRunsMs: 1000, + async until() { + // wait until we've seen processEvents invoked for each partition. + return ( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].processEvents; + }).length === partitionIds.length + ); + } + }); - logTester.assert(); - }); + const partitionsReadFromSub2 = new Set(); + const subscriptionHandlers2: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents(_, context) { + partitionsReadFromSub2.add(context.partitionId); + } + }; - it("Receive from all partitions, no coordination", async function(): Promise { - const logTester = new LogTester( - ["EventHubConsumerClient subscribing to all partitions, no checkpoint store."], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); + // start 2nd subscription with an ownerLevel so it triggers the close handlers on the 1st subscription. + const subscription2 = consumerClient2.subscribe(subscriptionHandlers2, { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + ownerLevel: 1 + }); - const tester = new ReceivedMessagesTester(partitionIds, false); + await loopUntil({ + maxTimes: 10, + name: + "Wait for subscription2 to read from all partitions and subscription1 to invoke close handlers", + timeBetweenRunsMs: 1000, + async until() { + const sub1CloseHandlersCalled = Boolean( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].close > 0; + }).length === partitionIds.length + ); + return partitionsReadFromSub2.size === partitionIds.length && sub1CloseHandlersCalled; + } + }); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); + // close subscription2 so subscription1 can recover. + await subscription2.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to recover", + timeBetweenRunsMs: 1000, + async until() { + // wait until we've seen an additional processEvent for each partition. + return ( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].processEvents; + }).length === partitionIds.length + ); + } + }); - const startPosition = await getStartingPositionsForTests(clients[0]); - const subscription = clients[0].subscribe(tester, { startPosition }); + await subscription1.close(); - await tester.runTestAndPoll(producerClient); - subscriptions.push(subscription); + for (const id of partitionIds) { + partitionHandlerCalls[id].initialize.should.be.greaterThan( + 1, + `Initialize on partition ${id} was not called more than 1 time.` + ); + partitionHandlerCalls[id].close.should.be.greaterThan( + 1, + `Close on partition ${id} was not called more than 1 time.` + ); + } + }); + }); - logTester.assert(); - }); + it("Receive from specific partitions, no coordination", async function(): Promise { + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to specific partition (0), no checkpoint store.", + "Single partition target: 0", + "No partitions owned, skipping abandoning." + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - it("Receive from all partitions, no coordination but through multiple subscribe() calls", async function(): Promise< - void - > { - const logTester = new LogTester( - [ - ...partitionIds.map( - (partitionId) => - `EventHubConsumerClient subscribing to specific partition (${partitionId}), no checkpoint store.`, - `Abandoning owned partitions` - ), - ...partitionIds.map((partitionId) => `Single partition target: ${partitionId}`) - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); + const tester = new ReceivedMessagesTester(["0"], false); - const tester = new ReceivedMessagesTester(partitionIds, false); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) + ); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); + const startPosition = await getStartingPositionsForTests(clients[0]); + const subscription = clients[0].subscribe("0", tester, { startPosition }); - const startPosition = await getStartingPositionsForTests(clients[0]); - for (const partitionId of await partitionIds) { - const subscription = clients[0].subscribe(partitionId, tester, { startPosition }); subscriptions.push(subscription); - } - - await tester.runTestAndPoll(producerClient); - - logTester.assert(); - }); - - it("Receive from all partitions, coordinating with the same partition manager and using the default LoadBalancingStrategy", async function(): Promise< - void - > { - // fast forward our partition manager so it starts reading from the latest offset - // instead of the beginning of time. - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", - /Starting event processor with ID /, - "Abandoning owned partitions" - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); - - const checkpointStore = new TestInMemoryCheckpointStore(); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - checkpointStore - // also uses the BalancedLoadBalancingStrategy - ) - ); - const startPosition = await getStartingPositionsForTests(clients[0]); + await tester.runTestAndPoll(producerClient); + await subscription.close(); // or else we won't see the partition abandoning messages - const tester = new ReceivedMessagesTester(partitionIds, true); + logTester.assert(); + }); - const subscriber1 = clients[0].subscribe(tester, { startPosition }); - subscriptions.push(subscriber1); + it("Receive from all partitions, no coordination", async function(): Promise { + const logTester = new LogTester( + ["EventHubConsumerClient subscribing to all partitions, no checkpoint store."], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - // specifying your own checkpoint store activates the "production ready" code path that - // also uses the BalancedLoadBalancingStrategy - ) - ); + const tester = new ReceivedMessagesTester(partitionIds, false); - const subscriber2 = clients[1].subscribe(tester, { startPosition }); - subscriptions.push(subscriber2); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) + ); - await tester.runTestAndPoll(producerClient); + const startPosition = await getStartingPositionsForTests(clients[0]); + const subscription = clients[0].subscribe(tester, { startPosition }); - // or else we won't see the abandoning message - for (const subscription of subscriptions) { - await subscription.close(); - } - logTester.assert(); - }); - - it("Receive from all partitions, coordinating with the same partition manager and using the GreedyLoadBalancingStrategy", async function(): Promise< - void - > { - // fast forward our partition manager so it starts reading from the latest offset - // instead of the beginning of time. - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", - /Starting event processor with ID /, - "Abandoning owned partitions" - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); + await tester.runTestAndPoll(producerClient); + subscriptions.push(subscription); - const checkpointStore = new TestInMemoryCheckpointStore(); + logTester.assert(); + }); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - { - loadBalancingOptions: { - strategy: "greedy" - } - } - ) - ); + it("Receive from all partitions, no coordination but through multiple subscribe() calls", async function(): Promise< + void + > { + const logTester = new LogTester( + [ + ...partitionIds.map( + (partitionId) => + `EventHubConsumerClient subscribing to specific partition (${partitionId}), no checkpoint store.`, + `Abandoning owned partitions` + ), + ...partitionIds.map((partitionId) => `Single partition target: ${partitionId}`) + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - const tester = new ReceivedMessagesTester(partitionIds, true); + const tester = new ReceivedMessagesTester(partitionIds, false); - const startPosition = await getStartingPositionsForTests(clients[0]); - const subscriber1 = clients[0].subscribe(tester, { startPosition }); - subscriptions.push(subscriber1); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) + ); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - checkpointStore, - { - loadBalancingOptions: { - strategy: "greedy" - } - } - ) - ); + const startPosition = await getStartingPositionsForTests(clients[0]); + for (const partitionId of await partitionIds) { + const subscription = clients[0].subscribe(partitionId, tester, { startPosition }); + subscriptions.push(subscription); + } - const subscriber2 = clients[1].subscribe(tester, { startPosition }); - subscriptions.push(subscriber2); + await tester.runTestAndPoll(producerClient); - await tester.runTestAndPoll(producerClient); + logTester.assert(); + }); - // or else we won't see the abandoning message - for (const subscription of subscriptions) { - await subscription.close(); - } - logTester.assert(); - }); + it("Receive from all partitions, coordinating with the same partition manager and using the default LoadBalancingStrategy", async function(): Promise< + void + > { + // fast forward our partition manager so it starts reading from the latest offset + // instead of the beginning of time. + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", + /Starting event processor with ID /, + "Abandoning owned partitions" + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - it("Stops receiving events if close is immediately called, single partition.", async function(): Promise< - void - > { - const partitionId = "0"; - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + const checkpointStore = new TestInMemoryCheckpointStore(); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + checkpointStore + // also uses the BalancedLoadBalancingStrategy + ) + ); + const startPosition = await getStartingPositionsForTests(clients[0]); - clients.push(client); - - let initializeCalled = 0; - let closeCalled = 0; - - const subscription = client.subscribe(partitionId, { - async processError() { - /* no-op */ - }, - async processEvents() { - /* no-op */ - }, - async processClose() { - closeCalled++; - }, - async processInitialize() { - initializeCalled++; - } - }); + const tester = new ReceivedMessagesTester(partitionIds, true); - await subscription.close(); + const subscriber1 = clients[0].subscribe(tester, { startPosition }); + subscriptions.push(subscriber1); - await loopUntil({ - maxTimes: 10, - name: "Wait for the subscription to stop running.", - timeBetweenRunsMs: 100, - async until() { - return !subscription.isRunning; - } - }); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + // specifying your own checkpoint store activates the "production ready" code path that + // also uses the BalancedLoadBalancingStrategy + ) + ); - // If `processInitialize` is called, then `processClose` should be called as well. - // Otherwise, we shouldn't see either called. - initializeCalled.should.equal( - closeCalled, - "processClose was not called the same number of times as processInitialize." - ); - }); + const subscriber2 = clients[1].subscribe(tester, { startPosition }); + subscriptions.push(subscriber2); - it("Stops receiving events if close is immediately called, multiple partitions.", async function(): Promise< - void - > { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + await tester.runTestAndPoll(producerClient); - clients.push(client); - - let initializeCalled = 0; - let closeCalled = 0; - - const subscription = client.subscribe({ - async processError() { - /* no-op */ - }, - async processEvents() { - /* no-op */ - }, - async processClose() { - closeCalled++; - }, - async processInitialize() { - initializeCalled++; + // or else we won't see the abandoning message + for (const subscription of subscriptions) { + await subscription.close(); } + logTester.assert(); }); - await subscription.close(); + it("Receive from all partitions, coordinating with the same partition manager and using the GreedyLoadBalancingStrategy", async function(): Promise< + void + > { + // fast forward our partition manager so it starts reading from the latest offset + // instead of the beginning of time. + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", + /Starting event processor with ID /, + "Abandoning owned partitions" + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); + + const checkpointStore = new TestInMemoryCheckpointStore(); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + { + loadBalancingOptions: { + strategy: "greedy" + } + } + ) + ); + + const tester = new ReceivedMessagesTester(partitionIds, true); + + const startPosition = await getStartingPositionsForTests(clients[0]); + const subscriber1 = clients[0].subscribe(tester, { startPosition }); + subscriptions.push(subscriber1); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + checkpointStore, + { + loadBalancingOptions: { + strategy: "greedy" + } + } + ) + ); + + const subscriber2 = clients[1].subscribe(tester, { startPosition }); + subscriptions.push(subscriber2); - await loopUntil({ - maxTimes: 10, - name: "Wait for the subscription to stop running.", - timeBetweenRunsMs: 100, - async until() { - return !subscription.isRunning; + await tester.runTestAndPoll(producerClient); + + // or else we won't see the abandoning message + for (const subscription of subscriptions) { + await subscription.close(); } + logTester.assert(); }); - // If `processInitialize` is called, then `processClose` should be called as well. - // Otherwise, we shouldn't see either called. - initializeCalled.should.equal( - closeCalled, - "processClose was not called the same number of times as processInitialize." - ); - }); - - describe("processError", function(): void { - it("supports awaiting subscription.close on non partition-specific errors", async function(): Promise< + it("Stops receiving events if close is immediately called, single partition.", async function(): Promise< void > { - // Use an invalid Event Hub name to trigger a non partition-specific error. + const partitionId = "0"; const client = new EventHubConsumerClient( EventHubConsumerClient.defaultConsumerGroupName, service.connectionString, - "Fake-Hub" + service.path ); - let subscription: Subscription; - const caughtErr: Error = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err, context) => { - if (!context.partitionId) { - await subscription.close(); - resolve(err); - } - } - }); + clients.push(client); + + let initializeCalled = 0; + let closeCalled = 0; + + const subscription = client.subscribe(partitionId, { + async processError() { + /* no-op */ + }, + async processEvents() { + /* no-op */ + }, + async processClose() { + closeCalled++; + }, + async processInitialize() { + initializeCalled++; + } }); - should.exist(caughtErr); + await subscription.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for the subscription to stop running.", + timeBetweenRunsMs: 100, + async until() { + return !subscription.isRunning; + } + }); - await client.close(); + // If `processInitialize` is called, then `processClose` should be called as well. + // Otherwise, we shouldn't see either called. + initializeCalled.should.equal( + closeCalled, + "processClose was not called the same number of times as processInitialize." + ); }); - it("supports awaiting subscription.close on partition-specific errors", async function(): Promise< + it("Stops receiving events if close is immediately called, multiple partitions.", async function(): Promise< void > { - // Use an invalid Event Hub name to trigger a non partition-specific error. const client = new EventHubConsumerClient( EventHubConsumerClient.defaultConsumerGroupName, service.connectionString, service.path ); - let subscription: Subscription; - const caughtErr: Error = await new Promise((resolve) => { - // Subscribe to an invalid partition id to trigger a partition-specific error. - subscription = client.subscribe("-1", { - processEvents: async () => { - /* no-op */ - }, - processError: async (err, context) => { - if (context.partitionId) { - await subscription.close(); - resolve(err); - } - } - }); - }); + clients.push(client); - should.exist(caughtErr); + let initializeCalled = 0; + let closeCalled = 0; - await client.close(); - }); - }); - }); - - describe("subscribe() with partitionId 0 as number", function(): void { - it("should not throw an error", async function(): Promise { - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - // @ts-expect-error number for partitionId should work even if type is string - 0, - { - processEvents: async () => { - resolve(); - }, - processError: async (err) => { - reject(err); - } + const subscription = client.subscribe({ + async processError() { + /* no-op */ }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP - } - ); - }); - await subscription!.close(); - }); - }); - - describe("subscribe() with EventPosition specified as", function(): void { - let partitionId: string; - let eventSentBeforeSubscribe: EventData; - let eventsSentAfterSubscribe: EventData[]; - - beforeEach(async () => { - partitionId = partitionIds[0]; - - eventSentBeforeSubscribe = { - body: "Hello awesome world " + Math.random() - }; - await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); - - eventsSentAfterSubscribe = []; - for (let i = 0; i < 5; i++) { - eventsSentAfterSubscribe.push({ - body: "Hello awesome world " + Math.random(), - properties: { - stamp: Math.random() + async processEvents() { + /* no-op */ + }, + async processClose() { + closeCalled++; + }, + async processInitialize() { + initializeCalled++; } }); - } - }); - it("'from end of stream' should receive messages correctly", async function(): Promise { - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 30 + await subscription.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for the subscription to stop running.", + timeBetweenRunsMs: 100, + async until() { + return !subscription.isRunning; } + }); + + // If `processInitialize` is called, then `processClose` should be called as well. + // Otherwise, we shouldn't see either called. + initializeCalled.should.equal( + closeCalled, + "processClose was not called the same number of times as processInitialize." ); }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with latestEventPosition."); - } + describe("processError", function(): void { + it("supports awaiting subscription.close on non partition-specific errors", async function(): Promise< + void + > { + // Use an invalid Event Hub name to trigger a non partition-specific error. + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + "Fake-Hub" + ); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); + let subscription: Subscription; + const caughtErr: Error = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + if (!context.partitionId) { + await subscription.close(); + resolve(err); + } + } + }); + }); + + should.exist(caughtErr); + + await client.close(); + }); + + it("supports awaiting subscription.close on partition-specific errors", async function(): Promise< + void + > { + // Use an invalid Event Hub name to trigger a non partition-specific error. + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - it("'after a particular sequence number' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; + let subscription: Subscription; + const caughtErr: Error = await new Promise((resolve) => { + // Subscribe to an invalid partition id to trigger a partition-specific error. + subscription = client.subscribe("-1", { + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + if (context.partitionId) { + await subscription.close(); + resolve(err); + } } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { + }); + }); + + should.exist(caughtErr); + + await client.close(); + }); + }); + }); + + describe("subscribe() with partitionId 0 as number", function(): void { + it("should not throw an error", async function(): Promise { + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + // @ts-expect-error number for partitionId should work even if type is string + 0, + { + processEvents: async () => { resolve(); + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP } - }, - { - startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, - maxWaitTimeInSeconds: 30 - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last sequence number."); - } - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } }); - it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); + describe("subscribe() with EventPosition specified as", function(): void { + let partitionId: string; + let eventSentBeforeSubscribe: EventData; + let eventsSentAfterSubscribe: EventData[]; - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } + beforeEach(async () => { + partitionId = partitionIds[0]; - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + eventSentBeforeSubscribe = { + body: "Hello awesome world " + Math.random() + }; + await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); + + eventsSentAfterSubscribe = []; + for (let i = 0; i < 5; i++) { + eventsSentAfterSubscribe.push({ + body: "Hello awesome world " + Math.random(), + properties: { + stamp: Math.random() + } + }); + } + }); + + it("'from end of stream' should receive messages correctly", async function(): Promise { + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { - sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with latestEventPosition."); + } - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } - }); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - it("'after a particular offset' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular sequence number' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { offset: partitionInfo.lastEnqueuedOffset }, - maxWaitTimeInSeconds: 30 - } + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last sequence number."); + } + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } - }); - it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; + it("'after a particular offset' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } + }, + { + startPosition: { offset: partitionInfo.lastEnqueuedOffset }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); + + it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { + offset: partitionInfo.lastEnqueuedOffset, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { - offset: partitionInfo.lastEnqueuedOffset, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 - } + ); + }); + await subscription!.close(); + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); + + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } }); - await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); + it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); }); - it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; - } + describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { + it("should have lastEnqueuedEventProperties populated", async function(): Promise { + const partitionId = partitionIds[0]; + + const eventData = { body: "Hello awesome world " + Math.random() }; + await producerClient.sendBatch([eventData], { partitionId }); + debug("sent: ", eventData); + + const pInfo = await consumerClient.getPartitionProperties(partitionId); + debug("partition info: ", pInfo); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data, context) => { + data.length.should.equal(1); + should.exist(context.lastEnqueuedEventProperties); + context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); + context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( + pInfo.lastEnqueuedSequenceNumber + ); + context + .lastEnqueuedEventProperties!.enqueuedOn!.getTime() + .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); + context + .lastEnqueuedEventProperties!.retrievedOn!.getTime() + .should.be.greaterThan(Date.now() - 60000); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { resolve(); + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: earliestEventPosition, + maxBatchSize: 1, + trackLastEnqueuedEventProperties: true } - }, - { - startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, - maxWaitTimeInSeconds: 30 - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + }); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + describe("Negative scenarios", function(): void { + it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< + void + > { + const badConsumerClient = new EventHubConsumerClient( + "boo", + service.connectionString, + service.path ); - } - }); - }); - - describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { - it("should have lastEnqueuedEventProperties populated", async function(): Promise { - const partitionId = partitionIds[0]; - - const eventData = { body: "Hello awesome world " + Math.random() }; - await producerClient.sendBatch([eventData], { partitionId }); - debug("sent: ", eventData); - - const pInfo = await consumerClient.getPartitionProperties(partitionId); - debug("partition info: ", pInfo); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data, context) => { - data.length.should.equal(1); - should.exist(context.lastEnqueuedEventProperties); - context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); - context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( - pInfo.lastEnqueuedSequenceNumber - ); - context - .lastEnqueuedEventProperties!.enqueuedOn!.getTime() - .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); - context - .lastEnqueuedEventProperties!.retrievedOn!.getTime() - .should.be.greaterThan(Date.now() - 60000); - - resolve(); + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = badConsumerClient.subscribe({ + processEvents: async () => { + /** Nothing to do here */ }, processError: async (err) => { - reject(err); + resolve(err); } - }, - { - startPosition: earliestEventPosition, - maxBatchSize: 1, - trackLastEnqueuedEventProperties: true - } - ); - }); - await subscription!.close(); - }); - }); - - describe("Negative scenarios", function(): void { - it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< - void - > { - const badConsumerClient = new EventHubConsumerClient( - "boo", - service.connectionString, - service.path - ); - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = badConsumerClient.subscribe({ - processEvents: async () => { - /** Nothing to do here */ - }, - processError: async (err) => { - resolve(err); - } + }); }); - }); - await subscription!.close(); - await badConsumerClient.close(); + await subscription!.close(); + await badConsumerClient.close(); - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); - }); + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); + }); - it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< - void - > { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = consumerClient.subscribe("boo", { - processEvents: async () => { - /** Nothing to do here */ - }, - processError: async (err) => { - resolve(err); - } + it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = consumerClient.subscribe("boo", { + processEvents: async () => { + /** Nothing to do here */ + }, + processError: async (err) => { + resolve(err); + } + }); }); + await subscription!.close(); + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); }); - await subscription!.close(); - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); }); - }); -}).timeout(120000); + }).timeout(120000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts b/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts index fee9f2f05dca..2914c1567841 100644 --- a/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts @@ -9,411 +9,435 @@ import debugModule from "debug"; const debug = debugModule("azure:event-hubs:hubruntime-spec"); import { EnvVarKeys, getEnvVars, setTracerForTest } from "./utils/testUtils"; import { setSpan, context } from "@azure/core-tracing"; -const env = getEnvVars(); import { SpanGraph } from "@azure/test-utils"; import { EventHubProducerClient, EventHubConsumerClient, MessagingError } from "../../src"; +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; +import { createMockServer } from "./utils/mockService"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); -describe("RuntimeInformation", function(): void { - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); - - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); - - function arrayOfIncreasingNumbersFromZero(length: any): Array { - const result = new Array(length); - for (let i = 0; i < length; i++) { - result[i] = `${i}`; - } - return result; + after("Stopping mock service", () => { + return service?.stop(); + }); } - describe("getPartitionIds", function(): void { - it("EventHubProducerClient returns an array of partition IDs", async function(): Promise { - const ids = await producerClient.getPartitionIds({}); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + describe("RuntimeInformation", function(): void { + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - it("EventHubConsumerClient returns an array of partition IDs", async function(): Promise { - const ids = await consumerClient.getPartitionIds({}); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); - - const rootSpan = tracer.startSpan("root"); - const ids = await producerClient.getPartitionIds({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getEventHubProperties", - children: [] - } - ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); }); - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + function arrayOfIncreasingNumbersFromZero(length: any): Array { + const result = new Array(length); + for (let i = 0; i < length; i++) { + result[i] = `${i}`; + } + return result; + } - const rootSpan = tracer.startSpan("root"); - const ids = await consumerClient.getPartitionIds({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + describe("getPartitionIds", function(): void { + it("EventHubProducerClient returns an array of partition IDs", async function(): Promise< + void + > { + const ids = await producerClient.getPartitionIds({}); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); }); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getEventHubProperties", - children: [] - } - ] - } - ] - }; - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - }); + it("EventHubConsumerClient returns an array of partition IDs", async function(): Promise< + void + > { + const ids = await consumerClient.getPartitionIds({}); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + }); - describe("hub runtime information", function(): void { - it("EventHubProducerClient gets the hub runtime information", async function(): Promise { - const hubRuntimeInfo = await producerClient.getEventHubProperties(); - debug(hubRuntimeInfo); - hubRuntimeInfo.name.should.equal(service.path); + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - hubRuntimeInfo.createdOn.should.be.instanceof(Date); - }); + const rootSpan = tracer.startSpan("root"); + const ids = await producerClient.getPartitionIds({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - it("EventHubConsumerClient gets the hub runtime information", async function(): Promise { - const hubRuntimeInfo = await consumerClient.getEventHubProperties(); - debug(hubRuntimeInfo); - hubRuntimeInfo.name.should.equal(service.path); + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - hubRuntimeInfo.createdOn.should.be.instanceof(Date); + const rootSpan = tracer.startSpan("root"); + const ids = await consumerClient.getPartitionIds({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + describe("hub runtime information", function(): void { + it("EventHubProducerClient gets the hub runtime information", async function(): Promise< + void + > { + const hubRuntimeInfo = await producerClient.getEventHubProperties(); + debug(hubRuntimeInfo); + hubRuntimeInfo.name.should.equal(service.path); - const rootSpan = tracer.startSpan("root"); - const hubRuntimeInfo = await producerClient.getEventHubProperties({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + hubRuntimeInfo.createdOn.should.be.instanceof(Date); }); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getEventHubProperties", - children: [] - } - ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubConsumerClient gets the hub runtime information", async function(): Promise< + void + > { + const hubRuntimeInfo = await consumerClient.getEventHubProperties(); + debug(hubRuntimeInfo); + hubRuntimeInfo.name.should.equal(service.path); - const rootSpan = tracer.startSpan("root"); - const hubRuntimeInfo = await consumerClient.getEventHubProperties({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + hubRuntimeInfo.createdOn.should.be.instanceof(Date); }); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getEventHubProperties", - children: [] - } - ] - } - ] - }; - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - }); - - describe("partition runtime information", function(): void { - it("EventHubProducerClient should throw an error if partitionId is missing", async function(): Promise< - void - > { - try { - await producerClient.getPartitionProperties(undefined as any); - throw new Error("Test failure"); - } catch (err) { - err.name.should.equal("TypeError"); - err.message.should.equal( - `getPartitionProperties called without required argument "partitionId"` - ); - } - }); + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - it("EventHubConsumerClient should throw an error if partitionId is missing", async function(): Promise< - void - > { - try { - await consumerClient.getPartitionProperties(undefined as any); - throw new Error("Test failure"); - } catch (err) { - err.name.should.equal("TypeError"); - err.message.should.equal( - `getPartitionProperties called without required argument "partitionId"` + const rootSpan = tracer.startSpan("root"); + const hubRuntimeInfo = await producerClient.getEventHubProperties({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) ); - } - }); - - it("EventHubProducerClient gets the partition runtime information with partitionId as a string", async function(): Promise< - void - > { - const partitionRuntimeInfo = await producerClient.getPartitionProperties("0"); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - it("EventHubConsumerClient gets the partition runtime information with partitionId as a string", async function(): Promise< - void - > { - const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0"); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - it("EventHubProducerClient gets the partition runtime information with partitionId as a number", async function(): Promise< - void - > { - const partitionRuntimeInfo = await producerClient.getPartitionProperties(0 as any); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + const rootSpan = tracer.startSpan("root"); + const hubRuntimeInfo = await consumerClient.getEventHubProperties({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); }); - it("EventHubConsumerClient gets the partition runtime information with partitionId as a number", async function(): Promise< - void - > { - const partitionRuntimeInfo = await consumerClient.getPartitionProperties(0 as any); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + describe("partition runtime information", function(): void { + it("EventHubProducerClient should throw an error if partitionId is missing", async function(): Promise< + void + > { + try { + await producerClient.getPartitionProperties(undefined as any); + throw new Error("Test failure"); + } catch (err) { + err.name.should.equal("TypeError"); + err.message.should.equal( + `getPartitionProperties called without required argument "partitionId"` + ); + } + }); - it("EventHubProducerClient bubbles up error from service for invalid partitionId", async function(): Promise< - void - > { - try { - await producerClient.getPartitionProperties("boo"); - throw new Error("Test failure"); - } catch (err) { - debug(`>>>> Received error - `, err); - should.exist(err); - should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); - } - }); + it("EventHubConsumerClient should throw an error if partitionId is missing", async function(): Promise< + void + > { + try { + await consumerClient.getPartitionProperties(undefined as any); + throw new Error("Test failure"); + } catch (err) { + err.name.should.equal("TypeError"); + err.message.should.equal( + `getPartitionProperties called without required argument "partitionId"` + ); + } + }); - it("EventHubConsumerClient bubbles up error from service for invalid partitionId", async function(): Promise< - void - > { - try { - await consumerClient.getPartitionProperties("boo"); - throw new Error("Test failure"); - } catch (err) { - debug(`>>>> Received error - `, err); - should.exist(err); - should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); - } - }); + it("EventHubProducerClient gets the partition runtime information with partitionId as a string", async function(): Promise< + void + > { + const partitionRuntimeInfo = await producerClient.getPartitionProperties("0"); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubConsumerClient gets the partition runtime information with partitionId as a string", async function(): Promise< + void + > { + const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0"); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - const rootSpan = tracer.startSpan("root"); - const partitionRuntimeInfo = await producerClient.getPartitionProperties("0", { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + it("EventHubProducerClient gets the partition runtime information with partitionId as a number", async function(): Promise< + void + > { + const partitionRuntimeInfo = await producerClient.getPartitionProperties(0 as any); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); }); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getPartitionProperties", - children: [] - } - ] - } - ] - }; - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + it("EventHubConsumerClient gets the partition runtime information with partitionId as a number", async function(): Promise< + void + > { + const partitionRuntimeInfo = await consumerClient.getPartitionProperties(0 as any); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubProducerClient bubbles up error from service for invalid partitionId", async function(): Promise< + void + > { + try { + await producerClient.getPartitionProperties("boo"); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error - `, err); + should.exist(err); + should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); + } + }); - const rootSpan = tracer.startSpan("root"); - const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0", { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) + it("EventHubConsumerClient bubbles up error from service for invalid partitionId", async function(): Promise< + void + > { + try { + await consumerClient.getPartitionProperties("boo"); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error - `, err); + should.exist(err); + should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); } }); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getPartitionProperties", - children: [] - } - ] + + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + const partitionRuntimeInfo = await producerClient.getPartitionProperties("0", { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) } - ] - }; + }); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getPartitionProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); + const rootSpan = tracer.startSpan("root"); + const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0", { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getPartitionProperties", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.spanContext().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); }); - }); -}).timeout(60000); + }).timeout(60000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/node/client.spec.ts b/sdk/eventhub/event-hubs/test/public/node/client.spec.ts index c1751eac0a4f..3414a5e9255a 100644 --- a/sdk/eventhub/event-hubs/test/public/node/client.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/node/client.spec.ts @@ -11,75 +11,73 @@ import { EnvVarKeys, getEnvVars } from "../utils/testUtils"; import { EnvironmentCredential, TokenCredential } from "@azure/identity"; import { EventHubProducerClient, EventHubConsumerClient } from "../../../src"; import { TestTracer, setTracer, resetTracer } from "@azure/test-utils"; -const env = getEnvVars(); - -describe("Create clients using Azure Identity", function(): void { - let endpoint: string; - let credential: TokenCredential; - before("validate environment", function() { - should.exist( - env[EnvVarKeys.AZURE_CLIENT_ID], - "define AZURE_CLIENT_ID in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.AZURE_TENANT_ID], - "define AZURE_TENANT_ID in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.AZURE_CLIENT_SECRET], - "define AZURE_CLIENT_SECRET in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - // This is of the form .servicebus.windows.net - endpoint = (env.EVENTHUB_CONNECTION_STRING.match("Endpoint=sb://(.*)/;") || "")[1]; - credential = new EnvironmentCredential(); - }); +import { testWithServiceTypes } from "../utils/testWithServiceTypes"; +import { createMockServer } from "../utils/mockService"; - it("creates an EventHubProducerClient from an Azure.Identity credential", async function(): Promise< - void - > { - const client = new EventHubProducerClient(endpoint, env.EVENTHUB_NAME, credential); - should.equal(client.fullyQualifiedNamespace, endpoint); +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - // Extra check involving actual call to the service to ensure this works - const hubInfo = await client.getEventHubProperties(); - should.equal(hubInfo.name, client.eventHubName); + after("Stopping mock service", () => { + return service?.stop(); + }); + } - await client.close(); - }); + describe("Create clients using Azure Identity", function(): void { + let endpoint: string; + let credential: TokenCredential; + before("validate environment", function() { + should.exist( + env[EnvVarKeys.AZURE_CLIENT_ID], + "define AZURE_CLIENT_ID in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.AZURE_TENANT_ID], + "define AZURE_TENANT_ID in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.AZURE_CLIENT_SECRET], + "define AZURE_CLIENT_SECRET in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + // This is of the form .servicebus.windows.net + endpoint = (env.EVENTHUB_CONNECTION_STRING.match("Endpoint=sb://(.*)/;") || "")[1]; + if (serviceVersion === "mock") { + // Create a mock credential that implements the TokenCredential interface. + credential = { + getToken(_args) { + return Promise.resolve({ token: "token", expiresOnTimestamp: Date.now() + 360000 }); + } + }; + } else { + credential = new EnvironmentCredential(); + } + }); - it("creates an EventHubConsumerClient from an Azure.Identity credential", async function(): Promise< - void - > { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - endpoint, - env.EVENTHUB_NAME, - credential - ); - should.equal(client.fullyQualifiedNamespace, endpoint); - - // Extra check involving actual call to the service to ensure this works - const hubInfo = await client.getEventHubProperties(); - should.equal(hubInfo.name, client.eventHubName); - - await client.close(); - }); + it("creates an EventHubProducerClient from an Azure.Identity credential", async function(): Promise< + void + > { + const client = new EventHubProducerClient(endpoint, env.EVENTHUB_NAME, credential); + should.equal(client.fullyQualifiedNamespace, endpoint); - describe("tracing", () => { - let tracer: TestTracer; - before(() => { - tracer = setTracer(); - }); + // Extra check involving actual call to the service to ensure this works + const hubInfo = await client.getEventHubProperties(); + should.equal(hubInfo.name, client.eventHubName); - after(() => { - resetTracer(); + await client.close(); }); - it("getEventHubProperties() creates a span with a peer.address attribute as the FQNS", async () => { + it("creates an EventHubConsumerClient from an Azure.Identity credential", async function(): Promise< + void + > { const client = new EventHubConsumerClient( EventHubConsumerClient.defaultConsumerGroupName, endpoint, @@ -93,16 +91,43 @@ describe("Create clients using Azure Identity", function(): void { should.equal(hubInfo.name, client.eventHubName); await client.close(); + }); + + describe("tracing", () => { + let tracer: TestTracer; + before(() => { + tracer = setTracer(); + }); + + after(() => { + resetTracer(); + }); + + it("getEventHubProperties() creates a span with a peer.address attribute as the FQNS", async () => { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + endpoint, + env.EVENTHUB_NAME, + credential + ); + should.equal(client.fullyQualifiedNamespace, endpoint); + + // Extra check involving actual call to the service to ensure this works + const hubInfo = await client.getEventHubProperties(); + should.equal(hubInfo.name, client.eventHubName); + + await client.close(); - const spans = tracer - .getKnownSpans() - .filter((s) => s.name === "Azure.EventHubs.getEventHubProperties"); + const spans = tracer + .getKnownSpans() + .filter((s) => s.name === "Azure.EventHubs.getEventHubProperties"); - spans.length.should.equal(1); - spans[0].attributes.should.deep.equal({ - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": client.eventHubName, - "peer.address": client.fullyQualifiedNamespace + spans.length.should.equal(1); + spans[0].attributes.should.deep.equal({ + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": client.eventHubName, + "peer.address": client.fullyQualifiedNamespace + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts b/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts index c567857c8293..a8f84c303575 100644 --- a/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts @@ -7,224 +7,240 @@ import chaiAsPromised from "chai-as-promised"; chai.use(chaiAsPromised); import { EnvVarKeys, getEnvVars } from "../utils/testUtils"; import { EventHubConsumerClient, EventHubProducerClient, Subscription } from "../../../src"; -const env = getEnvVars(); - -describe("disconnected", function() { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); +import { testWithServiceTypes } from "../utils/testWithServiceTypes"; +import { createMockServer } from "../utils/mockService"; + +testWithServiceTypes((serviceVersion, onVersions) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); + }); - describe("EventHubConsumerClient", function() { - it("runtimeInfo work after disconnect", async () => { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path + after("Stopping mock service", () => { + return service?.stop(); + }); + } + + describe("disconnected", function() { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); - const clientConnectionContext = client["_context"]; + }); - await client.getPartitionIds({}); - const originalConnectionId = clientConnectionContext.connectionId; + describe("EventHubConsumerClient", function() { + it("runtimeInfo work after disconnect", async () => { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + const clientConnectionContext = client["_context"]; - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); + await client.getPartitionIds({}); + const originalConnectionId = clientConnectionContext.connectionId; - const partitionIds = await client.getPartitionIds({}); - const newConnectionId = clientConnectionContext.connectionId; + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); - should.not.equal(originalConnectionId, newConnectionId); - partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); + const partitionIds = await client.getPartitionIds({}); + const newConnectionId = clientConnectionContext.connectionId; - await client.close(); - }); + should.not.equal(originalConnectionId, newConnectionId); + partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); - it("should receive after a disconnect", async () => { - /** - * This test validates that an `EventHubConsumerClient.subscribe()` call continues - * receiving events after a `disconnected` event occurs on the underlying connection. - * - * https://github.com/Azure/azure-sdk-for-js/pull/12280 describes an issue where `processEvents` - * would be invoked with 0 events and ignoring the `maxWaitTimeInSeconds` after a `disconnected` event. - * - * For a single `subscribe()` call, this test does the following: - * 1. Ensure events can be received normally before the `disconnected` event. - * 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. - * 3. Ensure that events can be received normally after the `disconnected` event. - */ - const consumer = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + await client.close(); + }); + + it("should receive after a disconnect", async () => { + /** + * This test validates that an `EventHubConsumerClient.subscribe()` call continues + * receiving events after a `disconnected` event occurs on the underlying connection. + * + * https://github.com/Azure/azure-sdk-for-js/pull/12280 describes an issue where `processEvents` + * would be invoked with 0 events and ignoring the `maxWaitTimeInSeconds` after a `disconnected` event. + * + * For a single `subscribe()` call, this test does the following: + * 1. Ensure events can be received normally before the `disconnected` event. + * 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. + * 3. Ensure that events can be received normally after the `disconnected` event. + */ + const consumer = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - const producer = new EventHubProducerClient(service.connectionString, service.path); - const eventSentBeforeDisconnect = { body: "the first event" }; - const eventSentAfterDisconnect = { body: "the second event" }; - - const maxWaitTimeInSeconds = 10; - const partitionId = "0"; - const partitionProperties = await consumer.getPartitionProperties(partitionId); - const clientConnectionContext = consumer["_context"]; - - // Send the first event after getting partition properties so that we can expect to receive it. - await producer.sendBatch([eventSentBeforeDisconnect], { partitionId }); - - let subscription: Subscription | undefined; - let originalConnectionId: string; - - let processEventsInvocationCount = 0; - let firstInvocationEndTime = 0; - await new Promise((resolve, reject) => { - subscription = consumer.subscribe( - partitionId, - { - processEvents: async (data) => { - processEventsInvocationCount++; - should.exist(data); - if (processEventsInvocationCount === 1) { - // 1. Ensure events can be received normally before the `disconnected` event. - should.equal( - data.length, - 1, - "Expected to receive 1 event in first processEvents invocation." - ); - should.equal(data[0].body, eventSentBeforeDisconnect.body); - originalConnectionId = clientConnectionContext.connectionId; - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - firstInvocationEndTime = Date.now(); - } else if (processEventsInvocationCount === 2) { - // 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. - // No new events should have been received at this point since we received the last event in the previous invocation. - should.equal( - data.length, - 0, - "Expected to receive 0 events in second processEvents invocation." - ); - // The elapsed time since the last processEvents invocation should be >= maxWaitTimeInSeconds - should.equal( - Date.now() - firstInvocationEndTime >= maxWaitTimeInSeconds, - true, - "Expected elapsed time between first and second processEvents invocations to be >= maxWaitTimeInSeconds." - ); - const newConnectionId = clientConnectionContext.connectionId; - should.not.equal(originalConnectionId, newConnectionId); - // Send a new event that will be immediately receivable. - await producer.sendBatch([eventSentAfterDisconnect], { partitionId }); - } else if (processEventsInvocationCount === 3) { - // 3. Ensure that events can be received normally after the `disconnected` event. - should.equal( - data.length, - 1, - "Expected to receive 1 event in third processEvents invocation." - ); - should.equal(data[0].body, eventSentAfterDisconnect.body); - const newConnectionId = clientConnectionContext.connectionId; - should.not.equal(originalConnectionId, newConnectionId); - resolve(); + const producer = new EventHubProducerClient(service.connectionString, service.path); + const eventSentBeforeDisconnect = { body: "the first event" }; + const eventSentAfterDisconnect = { body: "the second event" }; + + const maxWaitTimeInSeconds = 10; + const partitionId = "0"; + const partitionProperties = await consumer.getPartitionProperties(partitionId); + const clientConnectionContext = consumer["_context"]; + + // Send the first event after getting partition properties so that we can expect to receive it. + await producer.sendBatch([eventSentBeforeDisconnect], { partitionId }); + + let subscription: Subscription | undefined; + let originalConnectionId: string; + + let processEventsInvocationCount = 0; + let firstInvocationEndTime = 0; + await new Promise((resolve, reject) => { + subscription = consumer.subscribe( + partitionId, + { + processEvents: async (data) => { + processEventsInvocationCount++; + should.exist(data); + if (processEventsInvocationCount === 1) { + // 1. Ensure events can be received normally before the `disconnected` event. + should.equal( + data.length, + 1, + "Expected to receive 1 event in first processEvents invocation." + ); + should.equal(data[0].body, eventSentBeforeDisconnect.body); + originalConnectionId = clientConnectionContext.connectionId; + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); + firstInvocationEndTime = Date.now(); + } else if (processEventsInvocationCount === 2) { + // 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. + // No new events should have been received at this point since we received the last event in the previous invocation. + should.equal( + data.length, + 0, + "Expected to receive 0 events in second processEvents invocation." + ); + // The elapsed time since the last processEvents invocation should be >= maxWaitTimeInSeconds + should.equal( + Date.now() - firstInvocationEndTime >= maxWaitTimeInSeconds, + true, + "Expected elapsed time between first and second processEvents invocations to be >= maxWaitTimeInSeconds." + ); + const newConnectionId = clientConnectionContext.connectionId; + should.not.equal(originalConnectionId, newConnectionId); + // Send a new event that will be immediately receivable. + await producer.sendBatch([eventSentAfterDisconnect], { partitionId }); + } else if (processEventsInvocationCount === 3) { + // 3. Ensure that events can be received normally after the `disconnected` event. + should.equal( + data.length, + 1, + "Expected to receive 1 event in third processEvents invocation." + ); + should.equal(data[0].body, eventSentAfterDisconnect.body); + const newConnectionId = clientConnectionContext.connectionId; + should.not.equal(originalConnectionId, newConnectionId); + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { + sequenceNumber: partitionProperties.lastEnqueuedSequenceNumber + }, + maxWaitTimeInSeconds } - }, - { - startPosition: { - sequenceNumber: partitionProperties.lastEnqueuedSequenceNumber - }, - maxWaitTimeInSeconds - } - ); + ); + }); + await subscription!.close(); + await consumer.close(); + await producer.close(); }); - await subscription!.close(); - await consumer.close(); - await producer.close(); - }); - }); - - describe("EventHubProducerClient", function() { - it("runtimeInfo work after disconnect", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path); - const clientConnectionContext = client["_context"]; - - await client.getPartitionIds({}); - const originalConnectionId = clientConnectionContext.connectionId; - - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - - const partitionIds = await client.getPartitionIds({}); - const newConnectionId = clientConnectionContext.connectionId; - - should.not.equal(originalConnectionId, newConnectionId); - partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); - - await client.close(); }); - it("should send after a disconnect", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path); - const clientConnectionContext = client["_context"]; + describe("EventHubProducerClient", function() { + it("runtimeInfo work after disconnect", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path); + const clientConnectionContext = client["_context"]; - await client.sendBatch([{ body: "test" }]); - const originalConnectionId = clientConnectionContext.connectionId; + await client.getPartitionIds({}); + const originalConnectionId = clientConnectionContext.connectionId; - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - - await client.sendBatch([{ body: "test2" }]); - const newConnectionId = clientConnectionContext.connectionId; + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); - should.not.equal(originalConnectionId, newConnectionId); + const partitionIds = await client.getPartitionIds({}); + const newConnectionId = clientConnectionContext.connectionId; - await client.close(); - }); + should.not.equal(originalConnectionId, newConnectionId); + partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); - it("should not throw an uncaught exception", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path, { - retryOptions: { - timeoutInMs: 0 - } + await client.close(); }); - const clientConnectionContext = client["_context"]; - // Send an event to open the connection. - await client.sendBatch([{ body: "test" }]); - const originalConnectionId = clientConnectionContext.connectionId; + it("should send after a disconnect", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path); + const clientConnectionContext = client["_context"]; + + await client.sendBatch([{ body: "test" }]); + const originalConnectionId = clientConnectionContext.connectionId; - let thirdSend: Promise; - // Ensure that the connection will disconnect, and another sendBatch occurs while a sendBatch is in-flight. - setTimeout(() => { - // Trigger a disconnect on the underlying connection while the `sendBatch` is in flight. + // Trigger a disconnect on the underlying connection. clientConnectionContext.connection["_connection"].idle(); - // Triggering another sendBatch immediately after an idle - // used to cause the rhea connection remote state to be cleared. - // This caused the in-flight sendBatch to throw an uncaught error - // if it timed out. - thirdSend = client.sendBatch([{ body: "test3" }]); - }, 0); - await client.sendBatch([{ body: "test2" }]); - const newConnectionId = clientConnectionContext.connectionId; + await client.sendBatch([{ body: "test2" }]); + const newConnectionId = clientConnectionContext.connectionId; - should.not.equal(originalConnectionId, newConnectionId); + should.not.equal(originalConnectionId, newConnectionId); - // ensure the sendBatch from the setTimeout succeeded. - // Wait for the connectionContext to be ready for opening. - await thirdSend!; + await client.close(); + }); - await client.close(); + onVersions(["live"]).it("should not throw an uncaught exception", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path, { + retryOptions: { + timeoutInMs: 0 + } + }); + const clientConnectionContext = client["_context"]; + + // Send an event to open the connection. + await client.sendBatch([{ body: "test" }]); + const originalConnectionId = clientConnectionContext.connectionId; + + let thirdSend: Promise; + // Ensure that the connection will disconnect, and another sendBatch occurs while a sendBatch is in-flight. + setTimeout(async () => { + // Trigger a disconnect on the underlying connection while the `sendBatch` is in flight. + clientConnectionContext.connection["_connection"].idle(); + // Triggering another sendBatch immediately after an idle + // used to cause the rhea connection remote state to be cleared. + // This caused the in-flight sendBatch to throw an uncaught error + // if it timed out. + thirdSend = client.sendBatch([{ body: "test3" }]); + }, 0); + + await client.sendBatch([{ body: "test2" }]); + const newConnectionId = clientConnectionContext.connectionId; + + should.not.equal(originalConnectionId, newConnectionId); + + // ensure the sendBatch from the setTimeout succeeded. + // Wait for the connectionContext to be ready for opening. + await thirdSend!; + + await client.close(); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/receiver.spec.ts b/sdk/eventhub/event-hubs/test/public/receiver.spec.ts index 9e8f7038958b..179ff7b233f1 100644 --- a/sdk/eventhub/event-hubs/test/public/receiver.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/receiver.spec.ts @@ -16,471 +16,494 @@ import { EventHubProducerClient, Subscription } from "../../src"; +import { createMockServer } from "./utils/mockService"; import { EnvVarKeys, getEnvVars } from "./utils/testUtils"; -const env = getEnvVars(); - -describe("EventHubConsumerClient", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - - afterEach("Closing the clients", async () => { - await producerClient.close(); - await consumerClient.close(); - }); - - describe("subscribe() with partitionId 0 as number", function(): void { - it("should not throw an error", async function(): Promise { - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - // @ts-expect-error Testing the value 0 can be provided as a number for JS users. - 0, - { - processEvents: async () => { - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP - } - ); - }); - await subscription!.close(); - }); - }); - - describe("subscribe() with EventPosition specified as", function(): void { - let partitionId: string; - let eventSentBeforeSubscribe: EventData; - let eventsSentAfterSubscribe: EventData[]; - - beforeEach(async () => { - partitionId = partitionIds[0]; - - eventSentBeforeSubscribe = { - body: "Hello awesome world " + Math.random() - }; - await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); - - eventsSentAfterSubscribe = []; - for (let i = 0; i < 5; i++) { - eventsSentAfterSubscribe.push({ - body: "Hello awesome world " + Math.random(), - properties: { - stamp: Math.random() - } - }); - } +import { testWithServiceTypes } from "./utils/testWithServiceTypes"; + +testWithServiceTypes((serviceVersion) => { + const env = getEnvVars(); + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock service", () => { + service = createMockServer(); + return service.start(); }); - it("'from end of stream' should receive messages correctly", async function(): Promise { - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); - - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with latestEventPosition."); - } + after("Stopping mock service", () => { + return service?.stop(); + }); + } + describe("EventHubConsumerClient", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } + partitionIds = await producerClient.getPartitionIds({}); }); - it("'after a particular sequence number' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { + afterEach("Closing the clients", async () => { + await producerClient.close(); + await consumerClient.close(); + }); + + describe("subscribe() with partitionId 0 as number", function(): void { + it("should not throw an error", async function(): Promise { + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + // @ts-expect-error Testing the value 0 can be provided as a number for JS users. + 0, + { + processEvents: async () => { resolve(); + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP } - }, - { - startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, - maxWaitTimeInSeconds: 30 - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); - - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last sequence number."); - } - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } }); - it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); - - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } + describe("subscribe() with EventPosition specified as", function(): void { + let partitionId: string; + let eventSentBeforeSubscribe: EventData; + let eventsSentAfterSubscribe: EventData[]; + + beforeEach(async () => { + partitionId = partitionIds[0]; + + eventSentBeforeSubscribe = { + body: "Hello awesome world " + Math.random() + }; + await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); + + eventsSentAfterSubscribe = []; + for (let i = 0; i < 5; i++) { + eventsSentAfterSubscribe.push({ + body: "Hello awesome world " + Math.random(), + properties: { + stamp: Math.random() + } + }); + } + }); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'from end of stream' should receive messages correctly", async function(): Promise { + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { - sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with latestEventPosition."); + } - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } - }); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - it("'after a particular offset' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular sequence number' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { offset: partitionInfo.lastEnqueuedOffset }, - maxWaitTimeInSeconds: 30 - } + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last sequence number."); + } + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } - }); - it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); - - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; - } + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular offset' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { offset: partitionInfo.lastEnqueuedOffset }, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { - offset: partitionInfo.lastEnqueuedOffset, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 - } + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } }); - await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); + it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + offset: partitionInfo.lastEnqueuedOffset, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } - }); - it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; - } + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); } }, - processError: async (err) => { - reject(err); + { + startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, + maxWaitTimeInSeconds: 30 } - }, - { - startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); + ); + }); + await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." ); - } + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); }); - }); - - describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { - it("should have lastEnqueuedEventProperties populated", async function(): Promise { - const partitionId = partitionIds[0]; - - const eventData = { body: "Hello awesome world " + Math.random() }; - await producerClient.sendBatch([eventData], { partitionId }); - debug("sent: ", eventData); - - const pInfo = await consumerClient.getPartitionProperties(partitionId); - debug("partition info: ", pInfo); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data, context) => { - data.length.should.equal(1); - should.exist(context.lastEnqueuedEventProperties); - context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); - context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( - pInfo.lastEnqueuedSequenceNumber - ); - context - .lastEnqueuedEventProperties!.enqueuedOn!.getTime() - .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); - context - .lastEnqueuedEventProperties!.retrievedOn!.getTime() - .should.be.greaterThan(Date.now() - 60000); - - resolve(); + + describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { + it("should have lastEnqueuedEventProperties populated", async function(): Promise { + const partitionId = partitionIds[0]; + + const eventData = { body: "Hello awesome world " + Math.random() }; + await producerClient.sendBatch([eventData], { partitionId }); + debug("sent: ", eventData); + + const pInfo = await consumerClient.getPartitionProperties(partitionId); + debug("partition info: ", pInfo); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data, context) => { + data.length.should.equal(1); + should.exist(context.lastEnqueuedEventProperties); + context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); + context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( + pInfo.lastEnqueuedSequenceNumber + ); + context + .lastEnqueuedEventProperties!.enqueuedOn!.getTime() + .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); + context + .lastEnqueuedEventProperties!.retrievedOn!.getTime() + .should.be.greaterThan(Date.now() - 60000); + + resolve(); + }, + processError: async (err) => { + reject(err); + } }, - processError: async (err) => { - reject(err); + { + startPosition: earliestEventPosition, + maxBatchSize: 1, + trackLastEnqueuedEventProperties: true } - }, - { - startPosition: earliestEventPosition, - maxBatchSize: 1, - trackLastEnqueuedEventProperties: true - } - ); + ); + }); + await subscription!.close(); }); - await subscription!.close(); }); - }); -}).timeout(90000); + }).timeout(90000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts b/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts new file mode 100644 index 000000000000..c099a5de1b59 --- /dev/null +++ b/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export function createMockServer(): void { + /* no-op in browsers */ +} diff --git a/sdk/eventhub/event-hubs/test/public/utils/mockService.ts b/sdk/eventhub/event-hubs/test/public/utils/mockService.ts new file mode 100644 index 000000000000..3ec15d12a0a8 --- /dev/null +++ b/sdk/eventhub/event-hubs/test/public/utils/mockService.ts @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { readFileSync } from "fs"; +import { resolve as resolvePath } from "path"; +import { MockEventHub, MockServerOptions } from "@azure/mock-hub"; +import { getEnvVars } from "./testUtils"; + +export function createMockServer(options: MockServerOptions = {}): MockEventHub { + const { EVENTHUB_NAME } = getEnvVars(); + return new MockEventHub({ + name: EVENTHUB_NAME, + partitionCount: 4, + connectionInactivityTimeoutInMs: 300000, // 5 minutes + port: 5671, + tlsOptions: { + cert: readFileSync(resolvePath(process.cwd(), "certs", "my-server.crt.pem")), + key: readFileSync(resolvePath(process.cwd(), "certs", "my-server.key.pem")) + }, + ...options + }); +} diff --git a/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts b/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts index f35d305fa253..55d8f6a47bfd 100644 --- a/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts +++ b/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts @@ -19,10 +19,11 @@ export enum EnvVarKeys { EVENTHUB_NAME = "EVENTHUB_NAME", AZURE_TENANT_ID = "AZURE_TENANT_ID", AZURE_CLIENT_ID = "AZURE_CLIENT_ID", - AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET" + AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET", + TEST_TARGET = "TEST_TARGET" } -function getEnvVarValue(name: string): string | undefined { +export function getEnvVarValue(name: string): string | undefined { if (isNode) { return process.env[name]; } else { @@ -30,7 +31,29 @@ function getEnvVarValue(name: string): string | undefined { } } -export function getEnvVars(): { [key in EnvVarKeys]: any } { +function injectEnvironmentVariables( + envVars: Omit<{ [key in EnvVarKeys]: string }, EnvVarKeys.TEST_TARGET> +): void { + for (const key of Object.keys(envVars) as Exclude[]) { + if (isNode) { + process.env[key] = envVars[key]; + } else { + self.__env__[key] = envVars[key]; + } + } +} + +export function getEnvVars(): Omit<{ [key in EnvVarKeys]: any }, EnvVarKeys.TEST_TARGET> { + if (getEnvVarValue(EnvVarKeys.TEST_TARGET) === "mock") { + injectEnvironmentVariables({ + [EnvVarKeys.EVENTHUB_CONNECTION_STRING]: `Endpoint=sb://localhost/;SharedAccessKeyName=Foo;SharedAccessKey=Bar`, + [EnvVarKeys.EVENTHUB_NAME]: "mock-hub", + [EnvVarKeys.AZURE_TENANT_ID]: "AzureTenantId", + [EnvVarKeys.AZURE_CLIENT_ID]: "AzureClientId", + [EnvVarKeys.AZURE_CLIENT_SECRET]: "AzureClientSecret" + }); + } + return { [EnvVarKeys.EVENTHUB_CONNECTION_STRING]: getEnvVarValue(EnvVarKeys.EVENTHUB_CONNECTION_STRING), [EnvVarKeys.EVENTHUB_NAME]: getEnvVarValue(EnvVarKeys.EVENTHUB_NAME), diff --git a/sdk/eventhub/event-hubs/test/public/utils/testWithServiceTypes.ts b/sdk/eventhub/event-hubs/test/public/utils/testWithServiceTypes.ts new file mode 100644 index 000000000000..f143b7ba3693 --- /dev/null +++ b/sdk/eventhub/event-hubs/test/public/utils/testWithServiceTypes.ts @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { isNode, TestFunctionWrapper, versionsToTest } from "@azure/test-utils"; +import { getEnvVarValue } from "./testUtils"; + +export type SupportedTargets = "mock" | "live"; +const serviceVersions: SupportedTargets[] = ["mock", "live"]; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; +export function testWithServiceTypes( + handler: ( + serviceVersion: SupportedTargets, + onVersions: (supported: SupportedTargets[]) => TestFunctionWrapper + ) => void +): void { + // Wrap within an empty `describe` so that nested functions get the mocha + // context object for the current suite being ran. + describe("", function() { + // this.file comes from the current mocha suite context. + // eslint-disable-next-line @typescript-eslint/no-invalid-this + describe(this.file ?? "", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, function( + serviceVersion, + ...rest + ) { + if (serviceVersion !== testTarget) { + // The min-max tests don't currently allow us to set the environment variables + // we use to disable running all targets when TEST_MODE is live. + // This ensures we only run the tests against the target version we want. + return; + } + + if (serviceVersion === "mock" && !isNode) { + // We don't currently support running tests aginst the mock service in browsers. + // This can be revisted once the mock service supports websockets. + return; + } + + handler(serviceVersion as SupportedTargets, ...rest); + }); + }); + }); +} diff --git a/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js b/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js index a731ae25a082..a6c087f600bb 100644 --- a/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js +++ b/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -const { readFileSync } = require('fs'); -const { resolve: resolvePath } = require('path'); +const { readFileSync } = require("fs"); +const { resolve: resolvePath } = require("path"); const { MockEventHub } = require("@azure/mock-hub"); // Load the .env file if it exists @@ -14,19 +14,24 @@ async function main() { name: "mock-hub", partitionCount: 4, consumerGroups: ["foo"], - connectionInactivityTimeoutInMs: 300000 // 5 minutes + connectionInactivityTimeoutInMs: 300000, // 5 minutes + port: 5671, + tlsOptions: { + pfx: readFileSync(resolvePath(__dirname, "certs", "my-cert.pfx")), + passphrase: process.env["CERT_PASSPHRASE"] + } }); await service.start({ port: 5671, tlsOptions: { - pfx: readFileSync(resolvePath(__dirname, 'certs', 'my-cert.pfx')), + pfx: readFileSync(resolvePath(__dirname, "certs", "my-cert.pfx")), passphrase: process.env["CERT_PASSPHRASE"] } }); // Wait a minute then shut the service down. - await new Promise(resolve => setTimeout(resolve, 60000)); + await new Promise((resolve) => setTimeout(resolve, 60000)); return service.stop(); } diff --git a/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts b/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts index a813ca9db326..a8ccdb1d2ee8 100644 --- a/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts +++ b/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { readFileSync } from 'fs'; -import { resolve as resolvePath } from 'path'; +import { readFileSync } from "fs"; +import { resolve as resolvePath } from "path"; import { MockEventHub } from "@azure/mock-hub"; // Load the .env file if it exists @@ -14,19 +14,18 @@ export async function main(): Promise { name: "mock-hub", partitionCount: 4, consumerGroups: ["foo"], - connectionInactivityTimeoutInMs: 300000 // 5 minutes - }); - - await service.start({ + connectionInactivityTimeoutInMs: 300000, // 5 minutes port: 5671, tlsOptions: { - pfx: readFileSync(resolvePath(__dirname, 'certs', 'my-cert.pfx')), + pfx: readFileSync(resolvePath(__dirname, "certs", "my-cert.pfx")), passphrase: process.env["CERT_PASSPHRASE"] } }); + await service.start(); + // Wait a minute then shut the service down. - await new Promise(resolve => setTimeout(resolve, 60000)); + await new Promise((resolve) => setTimeout(resolve, 60000)); return service.stop(); } diff --git a/sdk/eventhub/mock-hub/src/index.ts b/sdk/eventhub/mock-hub/src/index.ts index 7ff25dd9c331..975d8b0f817f 100644 --- a/sdk/eventhub/mock-hub/src/index.ts +++ b/sdk/eventhub/mock-hub/src/index.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -export { MockEventHub, MockEventHubOptions } from "./services/eventHubs"; -export { StartOptions } from "./server/mockServer"; +export { MockEventHub, MockEventHubOptions, IMockEventHub } from "./services/eventHubs"; +export { MockServerOptions } from "./server/mockServer"; diff --git a/sdk/eventhub/mock-hub/src/sender/streamingPartitionSender.ts b/sdk/eventhub/mock-hub/src/sender/streamingPartitionSender.ts index a83de4090389..bd09fc975fab 100644 --- a/sdk/eventhub/mock-hub/src/sender/streamingPartitionSender.ts +++ b/sdk/eventhub/mock-hub/src/sender/streamingPartitionSender.ts @@ -112,8 +112,9 @@ export class StreamingPartitionSender { // And away it goes! sender.send(outgoingMessage); } catch (err) { - console.error(`Unexpected error while streaming events: `, err); - // swallow errors + if ((err as any)?.name !== "AbortError") { + console.error(`Unexpected error while streaming events: `, err); + } } } while (!abortSignal.aborted && !nextResult?.done); } diff --git a/sdk/eventhub/mock-hub/src/server/mockServer.ts b/sdk/eventhub/mock-hub/src/server/mockServer.ts index 38ed33eadd55..a83437ab5b02 100644 --- a/sdk/eventhub/mock-hub/src/server/mockServer.ts +++ b/sdk/eventhub/mock-hub/src/server/mockServer.ts @@ -18,7 +18,7 @@ import { } from "rhea"; import { convertBufferToMessages } from "../utils/convertBufferToMessage"; -export interface StartOptions { +export interface MockServerOptions { /** * The port number the server should listen on. * If not specified, an open port will be chosen at random. @@ -121,9 +121,11 @@ export interface OnMessagesEvent { export class MockServer extends EventEmitter { private _container: Container; private _listener?: ReturnType; + private _options: MockServerOptions; - constructor() { + constructor(options: MockServerOptions = {}) { super(); + this._options = options; this._container = create_container(); } @@ -140,11 +142,11 @@ export class MockServer extends EventEmitter { } /** - * Starts the server using the specified options. - * @param options + * Starts the server. */ - public start(options: StartOptions = {}): Promise { + public start(): Promise { return new Promise((resolve, reject) => { + const options = this._options; const ONE_MB = 1024 * 1024; const listenOptions: ListenOptions & ConnectionOptions & any = { port: options.port ?? 0, @@ -254,11 +256,13 @@ export class MockServer extends EventEmitter { } return new Promise((resolve, reject) => { listener.close((err) => { - if (err) { - reject(err); - } else { - resolve(); - } + setTimeout(() => { + if (err) { + reject(err); + } else { + resolve(); + } + }, 0); }); }); } @@ -343,7 +347,10 @@ export class MockServer extends EventEmitter { : [message]; for (const m of incomingMessages) { - if (m.body?.content) { + // The multiple check detects if an AMQP message is actually a batch of messages. + // If it is, then content is an array of individual AMQP messages. + // Otherwise, it's the content of a single AMQP message (e.g. sequence body type). + if (m.body.multiple && m.body?.content) { m.body = m.body.content; } } diff --git a/sdk/eventhub/mock-hub/src/services/eventHubs.ts b/sdk/eventhub/mock-hub/src/services/eventHubs.ts index 304b903de9a0..621d67f8e5e0 100644 --- a/sdk/eventhub/mock-hub/src/services/eventHubs.ts +++ b/sdk/eventhub/mock-hub/src/services/eventHubs.ts @@ -13,7 +13,7 @@ import { } from "rhea"; import { MockServer, - StartOptions, + MockServerOptions, SenderOpenEvent, ReceiverOpenEvent, OnMessagesEvent, @@ -34,7 +34,16 @@ import { generateBadPartitionInfoResponse } from "../messages/event-hubs/partitionInfo"; -export interface MockEventHubOptions { +export interface IMockEventHub { + readonly partitionIds: string[]; + readonly consumerGroups: Set; + readonly port: number; + + start: () => Promise; + stop: () => Promise; +} + +export interface MockEventHubOptions extends MockServerOptions { /** * The number of partitions for the Event Hub. * Defaults to 2. @@ -73,7 +82,7 @@ interface PartionReceiverEntityComponents { * * It stores events in memory and does not perform any auth verification. */ -export class MockEventHub { +export class MockEventHub implements IMockEventHub { /** * When the EventHub was 'created'. */ @@ -151,7 +160,7 @@ export class MockEventHub { this._consumerGroups = options.consumerGroups ?? []; this._connectionInactivityTimeoutInMs = options.connectionInactivityTimeoutInMs ?? 0; - this._mockServer = new MockServer(); + this._mockServer = new MockServer(options); this._mockServer.on("receiverOpen", this._handleReceiverOpen); this._mockServer.on("senderOpen", this._handleSenderOpen); this._mockServer.on("senderClose", this._handleSenderClose); @@ -199,7 +208,6 @@ export class MockEventHub { * @param event */ private _handleReceiverOpen = (event: ReceiverOpenEvent) => { - console.log(`Attempting to open receiver: ${event.entityPath}`); event.receiver.set_source(event.receiver.source); event.receiver.set_target(event.receiver.target); if (this._isReceiverPartitionEntityPath(event.entityPath)) { @@ -228,7 +236,6 @@ export class MockEventHub { * @param event */ private _handleSenderOpen = (event: SenderOpenEvent) => { - console.log(`Attempting to open sender: ${event.entityPath}`); event.sender.set_source(event.sender.source); event.sender.set_target(event.sender.target); if (event.entityPath === "$cbs") { @@ -294,7 +301,7 @@ export class MockEventHub { // Probably should close the sender at this point. event.sender.close({ condition: "amqp:internal-error", - description: err?.message ?? "" + description: (err as any)?.message ?? "" }); } } @@ -363,8 +370,6 @@ export class MockEventHub { * @param event */ private _handleOnMessages = (event: OnMessagesEvent) => { - console.log(`message entityPath: "${event.entityPath}"`); - // Handle batched messages first. if (event.entityPath === this._name) { // received a message without a partition id @@ -481,7 +486,6 @@ export class MockEventHub { const maxMessageSize = event.context.receiver?.get_option("max_message_size", 1024 * 1024) ?? 1024 * 1024; if (deliverySize >= maxMessageSize) { - console.log("too large!"); delivery.reject({ condition: "amqp:link:message-size-exceeded", description: `The received message (delivery-id:${ @@ -697,11 +701,9 @@ export class MockEventHub { /** * Starts the service. - * @param options */ - start(options: StartOptions) { - // this.enableDebug(1000); - return this._mockServer.start(options); + start() { + return this._mockServer.start(); } /**