diff --git a/Aspire.sln b/Aspire.sln
index 54c9d0e814..ac4e8cda51 100644
--- a/Aspire.sln
+++ b/Aspire.sln
@@ -186,6 +186,10 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OrleansServer", "samples\or
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ServiceDefaults", "samples\orleans\ServiceDefaults\ServiceDefaults.csproj", "{F7D9FA54-1F64-4A36-961A-0087F8E88D07}"
EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Aspire.Confluent.Kafka", "src\Components\Aspire.Confluent.Kafka\Aspire.Confluent.Kafka.csproj", "{174E0507-3BB0-4CDC-829E-9CA75DA66473}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Aspire.Confluent.Kafka.Tests", "tests\Aspire.Confluent.Kafka.Tests\Aspire.Confluent.Kafka.Tests.csproj", "{A8CB331A-1247-41D9-8118-538E5A2CC9DF}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -496,6 +500,14 @@ Global
{F7D9FA54-1F64-4A36-961A-0087F8E88D07}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F7D9FA54-1F64-4A36-961A-0087F8E88D07}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F7D9FA54-1F64-4A36-961A-0087F8E88D07}.Release|Any CPU.Build.0 = Release|Any CPU
+ {174E0507-3BB0-4CDC-829E-9CA75DA66473}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {174E0507-3BB0-4CDC-829E-9CA75DA66473}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {174E0507-3BB0-4CDC-829E-9CA75DA66473}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {174E0507-3BB0-4CDC-829E-9CA75DA66473}.Release|Any CPU.Build.0 = Release|Any CPU
+ {A8CB331A-1247-41D9-8118-538E5A2CC9DF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {A8CB331A-1247-41D9-8118-538E5A2CC9DF}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {A8CB331A-1247-41D9-8118-538E5A2CC9DF}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {A8CB331A-1247-41D9-8118-538E5A2CC9DF}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -582,6 +594,8 @@ Global
{04B03D1C-45C5-44D4-AEE5-BC315F3D9D26} = {8BAF2119-8370-4E9E-A887-D92506F8C727}
{20758E81-7316-49AC-8E1B-A5461397530A} = {8BAF2119-8370-4E9E-A887-D92506F8C727}
{F7D9FA54-1F64-4A36-961A-0087F8E88D07} = {8BAF2119-8370-4E9E-A887-D92506F8C727}
+ {174E0507-3BB0-4CDC-829E-9CA75DA66473} = {27381127-6C45-4B4C-8F18-41FF48DFE4B2}
+ {A8CB331A-1247-41D9-8118-538E5A2CC9DF} = {4981B3A5-4AFD-4191-BF7D-8692D9783D60}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {6DCEDFEC-988E-4CB3-B45B-191EB5086E0C}
diff --git a/Directory.Packages.props b/Directory.Packages.props
index 376258cf03..cc23dcb1c7 100644
--- a/Directory.Packages.props
+++ b/Directory.Packages.props
@@ -37,6 +37,7 @@
+
@@ -64,6 +65,7 @@
+
@@ -111,5 +113,7 @@
+
+
\ No newline at end of file
diff --git a/src/Aspire.Hosting/Kafka/KafkaBuilderExtensions.cs b/src/Aspire.Hosting/Kafka/KafkaBuilderExtensions.cs
new file mode 100644
index 0000000000..31d2fa17ac
--- /dev/null
+++ b/src/Aspire.Hosting/Kafka/KafkaBuilderExtensions.cs
@@ -0,0 +1,80 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire.Hosting.ApplicationModel;
+using Aspire.Hosting.Publishing;
+
+namespace Aspire.Hosting;
+
+public static class KafkaBuilderExtensions
+{
+ private const int KafkaBrokerPort = 9092;
+ ///
+ /// Adds a Kafka broker container to the application.
+ ///
+ /// The .
+ /// The name of the resource. This name will be used as the connection string name when referenced in a dependency.
+ /// The host port of Kafka broker.
+ /// A reference to the
+ public static IResourceBuilder AddKafkaContainer(this IDistributedApplicationBuilder builder, string name, int? port = null)
+ {
+ var kafka = new KafkaContainerResource(name);
+ return builder.AddResource(kafka)
+ .WithEndpoint(hostPort: port, containerPort: KafkaBrokerPort)
+ .WithAnnotation(new ContainerImageAnnotation { Image = "confluentinc/confluent-local", Tag = "latest" })
+ .WithManifestPublishingCallback(context => WriteKafkaContainerToManifest(context, kafka))
+ .WithEnvironment(context => ConfigureKafkaContainer(context, kafka));
+
+ static void WriteKafkaContainerToManifest(ManifestPublishingContext context, KafkaContainerResource resource)
+ {
+ context.WriteContainer(resource);
+ context.Writer.WriteString("connectionString", $"{{{resource.Name}.bindings.tcp.host}}:{{{resource.Name}.bindings.tcp.port}}");
+ }
+ }
+
+ ///
+ /// Adds a Kafka resource to the application. A container is used for local development.
+ ///
+ /// The .
+ /// The name of the resource. This name will be used as the connection string name when referenced in a dependency
+ /// A reference to the .
+ public static IResourceBuilder AddKafka(this IDistributedApplicationBuilder builder, string name)
+ {
+ var kafka = new KafkaServerResource(name);
+ return builder.AddResource(kafka)
+ .WithEndpoint(containerPort: KafkaBrokerPort)
+ .WithAnnotation(new ContainerImageAnnotation{ Image = "confluentinc/confluent-local", Tag = "latest" })
+ .WithManifestPublishingCallback(WriteKafkaServerToManifest)
+ .WithEnvironment(context => ConfigureKafkaContainer(context, kafka));
+
+ static void WriteKafkaServerToManifest(ManifestPublishingContext context)
+ {
+ context.Writer.WriteString("type", "kafka.server.v0");
+ }
+ }
+
+ private static void ConfigureKafkaContainer(EnvironmentCallbackContext context, IResource resource)
+ {
+ // confluentinc/confluent-local is a docker image that contains a Kafka broker started with KRaft to avoid pulling a separate image for ZooKeeper.
+ // See https://github.com/confluentinc/kafka-images/blob/master/local/README.md.
+ // When not explicitly set default configuration is applied.
+ // See https://github.com/confluentinc/kafka-images/blob/master/local/include/etc/confluent/docker/configureDefaults for more details.
+
+ var hostPort = context.PublisherName == "manifest"
+ ? KafkaBrokerPort
+ : GetResourcePort(resource);
+ context.EnvironmentVariables.Add("KAFKA_ADVERTISED_LISTENERS",
+ $"PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:{hostPort}");
+
+ static int GetResourcePort(IResource resource)
+ {
+ if (!resource.TryGetAllocatedEndPoints(out var allocatedEndpoints))
+ {
+ throw new DistributedApplicationException(
+ $"Kafka resource \"{resource.Name}\" does not have endpoint annotation.");
+ }
+
+ return allocatedEndpoints.Single().Port;
+ }
+ }
+}
diff --git a/src/Aspire.Hosting/Kafka/KafkaContainerResource.cs b/src/Aspire.Hosting/Kafka/KafkaContainerResource.cs
new file mode 100644
index 0000000000..f1b0fc0fb1
--- /dev/null
+++ b/src/Aspire.Hosting/Kafka/KafkaContainerResource.cs
@@ -0,0 +1,37 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire.Hosting.ApplicationModel;
+
+namespace Aspire.Hosting;
+
+///
+/// A resource that represents a Kafka broker container.
+///
+///
+public class KafkaContainerResource(string name) : ContainerResource(name), IResourceWithConnectionString, IResourceWithEnvironment
+{
+ ///
+ /// Gets the connection string for Kafka broker.
+ ///
+ /// A connection string for the Kafka in the form "host:port" to be passed as BootstrapServers.
+ public string? GetConnectionString()
+ {
+ if (!this.TryGetAllocatedEndPoints(out var allocatedEndpoints))
+ {
+ throw new DistributedApplicationException($"Kafka resource \"{Name}\" does not have endpoint annotation.");
+ }
+
+ return allocatedEndpoints.SingleOrDefault()?.EndPointString;
+ }
+
+ internal int GetPort()
+ {
+ if (!this.TryGetAllocatedEndPoints(out var allocatedEndpoints))
+ {
+ throw new DistributedApplicationException($"Kafka resource \"{Name}\" does not have endpoint annotation.");
+ }
+
+ return allocatedEndpoints.Single().Port;
+ }
+}
diff --git a/src/Aspire.Hosting/Kafka/KafkaServerResource.cs b/src/Aspire.Hosting/Kafka/KafkaServerResource.cs
new file mode 100644
index 0000000000..ac96db91af
--- /dev/null
+++ b/src/Aspire.Hosting/Kafka/KafkaServerResource.cs
@@ -0,0 +1,35 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Aspire.Hosting.ApplicationModel;
+
+///
+/// A resource that represents a Kafka broker.
+///
+/// The name of the resource.
+public class KafkaServerResource(string name) : Resource(name), IResourceWithConnectionString, IResourceWithEnvironment
+{
+ ///
+ /// Gets the connection string for Kafka broker.
+ ///
+ /// A connection string for the Kafka in the form "host:port" to be passed as BootstrapServers.
+ public string? GetConnectionString()
+ {
+ if (!this.TryGetAllocatedEndPoints(out var allocatedEndpoints))
+ {
+ throw new DistributedApplicationException($"Kafka resource \"{Name}\" does not have endpoint annotation.");
+ }
+
+ return allocatedEndpoints.SingleOrDefault()?.EndPointString;
+ }
+
+ internal int GetPort()
+ {
+ if (!this.TryGetAllocatedEndPoints(out var allocatedEndpoints))
+ {
+ throw new DistributedApplicationException($"Kafka resource \"{Name}\" does not have endpoint annotation.");
+ }
+
+ return allocatedEndpoints.Single().Port;
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/Aspire.Confluent.Kafka.csproj b/src/Components/Aspire.Confluent.Kafka/Aspire.Confluent.Kafka.csproj
new file mode 100644
index 0000000000..08e93806f1
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/Aspire.Confluent.Kafka.csproj
@@ -0,0 +1,23 @@
+
+
+
+ $(NetCurrent)
+ true
+ $(ComponentCommonPackageTags) kafka
+ Confluent.Kafka based Kafka generic consumer and producer that integrates with Aspire, including healthchecks and metrics.
+ $(NoWarn);SYSLIB1100
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Components/Aspire.Confluent.Kafka/AspireKafkaConsumerExtensions.cs b/src/Components/Aspire.Confluent.Kafka/AspireKafkaConsumerExtensions.cs
new file mode 100644
index 0000000000..52495eefd8
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/AspireKafkaConsumerExtensions.cs
@@ -0,0 +1,174 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire;
+using Aspire.Confluent.Kafka;
+using Confluent.Kafka;
+using HealthChecks.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+using Microsoft.Extensions.Diagnostics.HealthChecks;
+using Microsoft.Extensions.Logging;
+
+namespace Microsoft.Extensions.Hosting;
+
+///
+/// Extension methods for connecting to a Kafka broker.
+///
+public static class AspireKafkaConsumerExtensions
+{
+ private const string DefaultConfigSectionName = "Aspire:Confluent:Kafka:Consumer";
+
+ ///
+ /// Registers as a singleton in the services provided by the .
+ ///
+ /// The to read config from and add services to.
+ /// A name used to retrieve the connection string from the ConnectionStrings configuration section.
+ /// An optional method for customizing the .
+ /// An optional method used for customizing the .
+ /// Reads the configuration from "Aspire:Kafka:Consumer" section.
+ public static void AddKafkaConsumer(this IHostApplicationBuilder builder, string connectionName, Action? configureSettings = null, Action>? configureBuilder = null)
+ => AddKafkaConsumer(builder, DefaultConfigSectionName, configureSettings, configureBuilder, connectionName, serviceKey: null);
+
+ ///
+ /// Registers as a keyed singleton for the given in the services provided by the .
+ ///
+ /// The to read config from and add services to.
+ /// The name of the component, which is used as the of the service and also to retrieve the connection string from the ConnectionStrings configuration section.
+ /// An optional method for customizing the .
+ /// An optional method used for customizing the .
+ /// Reads the configuration from "Aspire:Kafka:Consumer:{name}" section.
+ public static void AddKeyedKafkaConsumer(this IHostApplicationBuilder builder, string name, Action? configureSettings = null, Action>? configureBuilder = null)
+ {
+ ArgumentException.ThrowIfNullOrEmpty(name);
+
+ AddKafkaConsumer(builder, $"{DefaultConfigSectionName}:{name}", configureSettings, configureBuilder, connectionName: name, serviceKey: name);
+ }
+
+ private static void AddKafkaConsumer(
+ IHostApplicationBuilder builder,
+ string configurationSectionName,
+ Action? configureSettings,
+ Action>? configureBuilder,
+ string connectionName,
+ string? serviceKey)
+ {
+ ArgumentNullException.ThrowIfNull(builder);
+
+ var settings = BuildConsumerSettings(builder, configurationSectionName, configureSettings, connectionName);
+
+ if (serviceKey is null)
+ {
+ builder.Services.AddSingleton>(sp => CreateConsumerConnectionFactory(sp, configureBuilder, settings));
+ builder.Services.AddSingleton>(sp => sp.GetRequiredService>().Create());
+ }
+ else
+ {
+ builder.Services.AddKeyedSingleton>(serviceKey, (sp, key) => CreateConsumerConnectionFactory(sp, configureBuilder, settings));
+ builder.Services.AddKeyedSingleton>(serviceKey, (sp, key) => sp.GetRequiredKeyedService>(key).Create());
+ }
+
+ if (settings.Metrics)
+ {
+ builder.Services.TryAddSingleton();
+ builder.Services.AddHostedService();
+ builder.Services.TryAddSingleton();
+ builder.Services.AddOpenTelemetry().WithMetrics(metricBuilderProvider => metricBuilderProvider.AddMeter(ConfluentKafkaCommon.MeterName));
+ }
+
+ if (settings.HealthChecks)
+ {
+ string healthCheckName = serviceKey is null
+ ? ConfluentKafkaCommon.ConsumerHealthCheckName
+ : string.Concat(ConfluentKafkaCommon.KeyedConsumerHealthCheckName, connectionName);
+
+ builder.TryAddHealthCheck(new HealthCheckRegistration(healthCheckName,
+ sp =>
+ {
+ var connectionFactory = serviceKey is null
+ ? sp.GetRequiredService>()
+ : sp.GetRequiredKeyedService>(serviceKey);
+
+ var options = new KafkaHealthCheckOptions();
+ options.Configuration = new ProducerConfig(connectionFactory.Config.ToDictionary());
+ options.Configuration.SocketTimeoutMs = 1000;
+ options.Configuration.MessageTimeoutMs = 1000;
+ options.Configuration.StatisticsIntervalMs = 0;
+ return new KafkaHealthCheck(options);
+ },
+ failureStatus: default,
+ tags: default));
+ }
+ }
+
+ private static ConsumerConnectionFactory CreateConsumerConnectionFactory(IServiceProvider serviceProvider, Action>? configureBuilder, KafkaConsumerSettings settings)
+ => new(CreateConsumerBuilder(serviceProvider, configureBuilder, settings), settings.Config);
+
+ private static ConsumerBuilder CreateConsumerBuilder(IServiceProvider serviceProvider, Action>? configureBuilder, KafkaConsumerSettings settings)
+ {
+ settings.Validate();
+
+ ConsumerBuilder builder = new(settings.Config);
+ ILogger logger = serviceProvider.GetRequiredService().CreateLogger(ConfluentKafkaCommon.LogCategoryName);
+ configureBuilder?.Invoke(builder);
+
+ try
+ {
+ void OnLog(IConsumer _, LogMessage logMessage) =>
+ logger.Log((LogLevel)logMessage.LevelAs(LogLevelType.MicrosoftExtensionsLogging), logMessage.Facility?.GetHashCode() ?? 0, logMessage.Message, null, static (value, ex) => value);
+
+ builder.SetLogHandler(OnLog);
+ }
+ catch (InvalidOperationException)
+ {
+ logger.LogWarning("LogHandler is already set. Skipping... No logs will be written.");
+ }
+
+ if (settings.Metrics)
+ {
+ MetricsChannel channel = serviceProvider.GetRequiredService();
+ void OnStatistics(IConsumer _, string json)
+ {
+ if (string.IsNullOrEmpty(json))
+ {
+ return;
+ }
+
+ // StatisticsHandler is called on the consume thread, so we need to offload the processing
+ // to avoid slowing the consumer down.
+ channel.Writer.TryWrite(json);
+ }
+
+ try
+ {
+ builder.SetStatisticsHandler(OnStatistics);
+ }
+ catch (InvalidOperationException)
+ {
+ logger.LogWarning("StatisticsHandler is already set. Skipping... No metrics will be exposed.");
+ }
+ }
+ return builder;
+ }
+
+ private static KafkaConsumerSettings BuildConsumerSettings(IHostApplicationBuilder builder, string configurationSectionName, Action? configureSettings, string connectionName)
+ {
+ var configSection = builder.Configuration.GetSection(configurationSectionName);
+ KafkaConsumerSettings settings = new();
+ configSection.Bind(settings);
+
+ // Manually bind the ConsumerConfig until https://github.com/dotnet/runtime/issues/96652 is fixed
+ configSection.GetSection(nameof(KafkaConsumerSettings.Config)).Bind(settings.Config);
+
+ if (builder.Configuration.GetConnectionString(connectionName) is string connectionString)
+ {
+ settings.ConnectionString = connectionString;
+ }
+
+ configureSettings?.Invoke(settings);
+
+ settings.Consolidate();
+ return settings;
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/AspireKafkaProducerExtensions.cs b/src/Components/Aspire.Confluent.Kafka/AspireKafkaProducerExtensions.cs
new file mode 100644
index 0000000000..42fbd06416
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/AspireKafkaProducerExtensions.cs
@@ -0,0 +1,174 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire;
+using Aspire.Confluent.Kafka;
+using Confluent.Kafka;
+using HealthChecks.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+using Microsoft.Extensions.Diagnostics.HealthChecks;
+using Microsoft.Extensions.Logging;
+
+namespace Microsoft.Extensions.Hosting;
+
+///
+/// Extension methods for connecting to a Kafka broker.
+///
+public static class AspireKafkaProducerExtensions
+{
+ private const string DefaultConfigSectionName = "Aspire:Confluent:Kafka:Producer";
+
+ ///
+ /// Registers as a singleton in the services provided by the .
+ ///
+ /// The to read config from and add services to.
+ /// A name used to retrieve the connection string from the ConnectionStrings configuration section.
+ /// An optional method used for customizing the .
+ /// A method used for customizing the .
+ /// Reads the configuration from "Aspire:Kafka:Producer" section.
+ public static void AddKafkaProducer(this IHostApplicationBuilder builder, string connectionName, Action? configureSettings = null, Action>? configureBuilder = null)
+ => AddKafkaProducer(builder, DefaultConfigSectionName, configureSettings, configureBuilder, connectionName, serviceKey: null);
+
+ ///
+ /// Registers as a keyed singleton for the given in the services provided by the .
+ ///
+ /// The to read config from and add services to.
+ /// The name of the component, which is used as the of the service and also to retrieve the connection string from the ConnectionStrings configuration section.
+ /// An optional method used for customizing the .
+ /// An optional method used for customizing the .
+ /// Reads the configuration from "Aspire:Kafka:Producer:{name}" section.
+ public static void AddKeyedKafkaProducer(this IHostApplicationBuilder builder, string name, Action? configureSettings = null, Action>? configureBuilder = null)
+ {
+ ArgumentException.ThrowIfNullOrEmpty(name);
+
+ AddKafkaProducer(builder, $"{DefaultConfigSectionName}:{name}", configureSettings, configureBuilder, connectionName: name, serviceKey: name);
+ }
+
+ private static void AddKafkaProducer(
+ IHostApplicationBuilder builder,
+ string configurationSectionName,
+ Action? configureSettings,
+ Action>? configureBuilder,
+ string connectionName,
+ string? serviceKey)
+ {
+ ArgumentNullException.ThrowIfNull(builder);
+
+ var settings = BuildProducerSettings(builder, configurationSectionName, configureSettings, connectionName);
+
+ if (serviceKey is null)
+ {
+ builder.Services.AddSingleton>(sp => CreateProducerConnectionFactory(sp, configureBuilder, settings));
+ builder.Services.AddSingleton>(sp => sp.GetRequiredService>().Create());
+ }
+ else
+ {
+ builder.Services.AddKeyedSingleton>(serviceKey, (sp, key) => CreateProducerConnectionFactory(sp, configureBuilder, settings));
+ builder.Services.AddKeyedSingleton>(serviceKey, (sp, key) => sp.GetRequiredKeyedService>(key).Create());
+ }
+
+ if (settings.Metrics)
+ {
+ builder.Services.TryAddSingleton();
+ builder.Services.AddHostedService();
+ builder.Services.TryAddSingleton();
+ builder.Services.AddOpenTelemetry().WithMetrics(metricBuilderProvider => metricBuilderProvider.AddMeter(ConfluentKafkaCommon.MeterName));
+ }
+
+ if (settings.HealthChecks)
+ {
+ string healthCheckName = serviceKey is null
+ ? ConfluentKafkaCommon.ProducerHealthCheckName
+ : string.Concat(ConfluentKafkaCommon.KeyedProducerHealthCheckName, connectionName);
+
+ builder.TryAddHealthCheck(new HealthCheckRegistration(healthCheckName,
+ sp =>
+ {
+ var connectionFactory = serviceKey is null
+ ? sp.GetRequiredService>()
+ : sp.GetRequiredKeyedService>(serviceKey);
+
+ var options = new KafkaHealthCheckOptions();
+ options.Configuration = new ProducerConfig(connectionFactory.Config.ToDictionary());
+ options.Configuration.SocketTimeoutMs = 1000;
+ options.Configuration.MessageTimeoutMs = 1000;
+ options.Configuration.StatisticsIntervalMs = 0;
+ return new KafkaHealthCheck(options);
+ },
+ failureStatus: default,
+ tags: default));
+ }
+ }
+
+ private static ProducerConnectionFactory CreateProducerConnectionFactory(IServiceProvider serviceProvider, Action>? configureBuilder, KafkaProducerSettings settings)
+ => new(CreateProducerBuilder(serviceProvider, configureBuilder, settings), settings.Config);
+
+ private static ProducerBuilder CreateProducerBuilder(IServiceProvider serviceProvider, Action>? configureBuilder, KafkaProducerSettings settings)
+ {
+ settings.Validate();
+
+ ProducerBuilder builder = new(settings.Config);
+ ILogger logger = serviceProvider.GetRequiredService().CreateLogger(ConfluentKafkaCommon.LogCategoryName);
+ configureBuilder?.Invoke(builder);
+
+ try
+ {
+ void OnLog(IProducer _, LogMessage logMessage) =>
+ logger.Log((LogLevel)logMessage.LevelAs(LogLevelType.MicrosoftExtensionsLogging), logMessage.Facility?.GetHashCode() ?? 0, logMessage.Message, null, static (value, ex) => value);
+
+ builder.SetLogHandler(OnLog);
+ }
+ catch (InvalidOperationException)
+ {
+ logger.LogWarning("LogHandler is already set. Skipping... No logs will be written.");
+ }
+
+ if (settings.Metrics)
+ {
+ MetricsChannel channel = serviceProvider.GetRequiredService();
+ void OnStatistics(IProducer _, string json)
+ {
+ if (string.IsNullOrEmpty(json))
+ {
+ return;
+ }
+
+ // StatisticsHandler is called on the producer poll thread, we need to offload the processing
+ // to avoid slowing the producer down.
+ channel.Writer.TryWrite(json);
+ };
+
+ try
+ {
+ builder.SetStatisticsHandler(OnStatistics);
+ }
+ catch (InvalidOperationException)
+ {
+ logger.LogWarning("StatisticsHandler is already set. Skipping... No metrics will be exposed.");
+ }
+ }
+ return builder;
+ }
+
+ private static KafkaProducerSettings BuildProducerSettings(IHostApplicationBuilder builder, string configurationSectionName, Action? configureSettings, string connectionName)
+ {
+ var configSection = builder.Configuration.GetSection(configurationSectionName);
+ KafkaProducerSettings settings = new();
+ configSection.Bind(settings);
+
+ // Manually bind the ProducerConfig until https://github.com/dotnet/runtime/issues/96652 is fixed
+ configSection.GetSection(nameof(KafkaProducerSettings.Config)).Bind(settings.Config);
+
+ if (builder.Configuration.GetConnectionString(connectionName) is string connectionString)
+ {
+ settings.ConnectionString = connectionString;
+ }
+
+ configureSettings?.Invoke(settings);
+
+ settings.Consolidate();
+ return settings;
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/ConfigurationSchema.json b/src/Components/Aspire.Confluent.Kafka/ConfigurationSchema.json
new file mode 100644
index 0000000000..0ca3396de0
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/ConfigurationSchema.json
@@ -0,0 +1,961 @@
+{
+ "definitions": {
+ "logLevel": {
+ "properties": {
+ "Aspire.Confluent.Kafka": {
+ "$ref": "#/definitions/logLevelThreshold"
+ }
+ }
+ }
+ },
+ "properties": {
+ "Aspire": {
+ "type": "object",
+ "properties": {
+ "Confluent": {
+ "type": "object",
+ "properties": {
+ "Kafka": {
+ "type": "object",
+ "properties": {
+ "Consumer": {
+ "type": "object",
+ "properties": {
+ "Config": {
+ "type": "object",
+ "properties": {
+ "Acks": {
+ "enum": [
+ "None",
+ "Leader",
+ "All"
+ ],
+ "description": "This field indicates the number of acknowledgements the leader broker must receive from ISR brokers\nbefore responding to the request: Zero=Broker does not send any response/ack to client, One=The\nleader will write the record to its local log but will respond without awaiting full acknowledgement\nfrom all followers. All=Broker will block until message is committed by all in sync replicas (ISRs).\nIf there are less than min.insync.replicas (broker configuration) in the ISR set the produce request\nwill fail."
+ },
+ "AllowAutoCreateTopics": {
+ "type": "boolean",
+ "description": "Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.\n\ndefault: false\nimportance: low"
+ },
+ "ApiVersionFallbackMs": {
+ "type": "integer",
+ "description": "Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).\n\ndefault: 0\nimportance: medium"
+ },
+ "ApiVersionRequest": {
+ "type": "boolean",
+ "description": "Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.\n\ndefault: true\nimportance: high"
+ },
+ "ApiVersionRequestTimeoutMs": {
+ "type": "integer",
+ "description": "Timeout for broker API version requests.\n\ndefault: 10000\nimportance: low"
+ },
+ "AutoCommitIntervalMs": {
+ "type": "integer",
+ "description": "The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.\n\ndefault: 5000\nimportance: medium"
+ },
+ "AutoOffsetReset": {
+ "enum": [
+ "Latest",
+ "Earliest",
+ "Error"
+ ],
+ "description": "Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.\n\ndefault: largest\nimportance: high"
+ },
+ "BootstrapServers": {
+ "type": "string",
+ "description": "Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.\n\ndefault: ''\nimportance: high"
+ },
+ "BrokerAddressFamily": {
+ "enum": [
+ "Any",
+ "V4",
+ "V6"
+ ],
+ "description": "Allowed broker IP address families: any, v4, v6\n\ndefault: any\nimportance: low"
+ },
+ "BrokerAddressTtl": {
+ "type": "integer",
+ "description": "How long to cache the broker address resolving results (milliseconds).\n\ndefault: 1000\nimportance: low"
+ },
+ "BrokerVersionFallback": {
+ "type": "string",
+ "description": "Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.\n\ndefault: 0.10.0\nimportance: medium"
+ },
+ "CancellationDelayMaxMs": {
+ "type": "integer",
+ "description": "The maximum length of time (in milliseconds) before a cancellation request\nis acted on. Low values may result in measurably higher CPU usage.\n\ndefault: 100\nrange: 1 <= dotnet.cancellation.delay.max.ms <= 10000\nimportance: low"
+ },
+ "CheckCrcs": {
+ "type": "boolean",
+ "description": "Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.\n\ndefault: false\nimportance: medium"
+ },
+ "ClientDnsLookup": {
+ "enum": [
+ "UseAllDnsIps",
+ "ResolveCanonicalBootstrapServersOnly"
+ ],
+ "description": "Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. NOTE: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname.\n\ndefault: use_all_dns_ips\nimportance: low"
+ },
+ "ClientId": {
+ "type": "string",
+ "description": "Client identifier.\n\ndefault: rdkafka\nimportance: low"
+ },
+ "ClientRack": {
+ "type": "string",
+ "description": "A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.\n\ndefault: ''\nimportance: low"
+ },
+ "ConnectionsMaxIdleMs": {
+ "type": "integer",
+ "description": "Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).\n\ndefault: 0\nimportance: medium"
+ },
+ "ConsumeResultFields": {
+ "type": "string",
+ "description": "A comma separated list of fields that may be optionally set\nin 'Confluent.Kafka.ConsumeResult`2' objects returned by the 'Confluent.Kafka.Consumer`2.Consume(System.TimeSpan)' method. Disabling fields that you do not require will improve\nthroughput and reduce memory consumption. Allowed values:\nheaders, timestamp, topic, all, none\n\ndefault: all\nimportance: low"
+ },
+ "CoordinatorQueryIntervalMs": {
+ "type": "integer",
+ "description": "How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.\n\ndefault: 600000\nimportance: low"
+ },
+ "Debug": {
+ "type": "string",
+ "description": "A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch\n\ndefault: ''\nimportance: medium"
+ },
+ "EnableAutoCommit": {
+ "type": "boolean",
+ "description": "Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().\n\ndefault: true\nimportance: high"
+ },
+ "EnableAutoOffsetStore": {
+ "type": "boolean",
+ "description": "Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.\n\ndefault: true\nimportance: high"
+ },
+ "EnablePartitionEof": {
+ "type": "boolean",
+ "description": "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.\n\ndefault: false\nimportance: low"
+ },
+ "EnableRandomSeed": {
+ "type": "boolean",
+ "description": "If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().\n\ndefault: true\nimportance: low"
+ },
+ "EnableSaslOauthbearerUnsecureJwt": {
+ "type": "boolean",
+ "description": "Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.\n\ndefault: false\nimportance: low"
+ },
+ "EnableSslCertificateVerification": {
+ "type": "boolean",
+ "description": "Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.\n\ndefault: true\nimportance: low"
+ },
+ "FetchErrorBackoffMs": {
+ "type": "integer",
+ "description": "How long to postpone the next fetch request for a topic+partition in case of a fetch error.\n\ndefault: 500\nimportance: medium"
+ },
+ "FetchMaxBytes": {
+ "type": "integer",
+ "description": "Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).\n\ndefault: 52428800\nimportance: medium"
+ },
+ "FetchMinBytes": {
+ "type": "integer",
+ "description": "Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.\n\ndefault: 1\nimportance: low"
+ },
+ "FetchQueueBackoffMs": {
+ "type": "integer",
+ "description": "How long to postpone the next fetch request for a topic+partition in case the current fetch queue thresholds (queued.min.messages or queued.max.messages.kbytes) have been exceded. This property may need to be decreased if the queue thresholds are set low and the application is experiencing long (~1s) delays between messages. Low values may increase CPU utilization.\n\ndefault: 1000\nimportance: medium"
+ },
+ "FetchWaitMaxMs": {
+ "type": "integer",
+ "description": "Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.\n\ndefault: 500\nimportance: low"
+ },
+ "GroupId": {
+ "type": "string",
+ "description": "Client group id string. All clients sharing the same group.id belong to the same group.\n\ndefault: ''\nimportance: high"
+ },
+ "GroupInstanceId": {
+ "type": "string",
+ "description": "Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.\n\ndefault: ''\nimportance: medium"
+ },
+ "GroupProtocolType": {
+ "type": "string",
+ "description": "Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.\n\ndefault: consumer\nimportance: low"
+ },
+ "HeartbeatIntervalMs": {
+ "type": "integer",
+ "description": "Group session keepalive heartbeat interval.\n\ndefault: 3000\nimportance: low"
+ },
+ "InternalTerminationSignal": {
+ "type": "integer",
+ "description": "Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.\n\ndefault: 0\nimportance: low"
+ },
+ "IsolationLevel": {
+ "enum": [
+ "ReadUncommitted",
+ "ReadCommitted"
+ ],
+ "description": "Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.\n\ndefault: read_committed\nimportance: high"
+ },
+ "LogConnectionClose": {
+ "type": "boolean",
+ "description": "Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value.\n\ndefault: true\nimportance: low"
+ },
+ "LogQueue": {
+ "type": "boolean",
+ "description": "Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.\n\ndefault: false\nimportance: low"
+ },
+ "LogThreadName": {
+ "type": "boolean",
+ "description": "Print internal thread name in log messages (useful for debugging librdkafka internals)\n\ndefault: true\nimportance: low"
+ },
+ "MaxInFlight": {
+ "type": "integer",
+ "description": "Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.\n\ndefault: 1000000\nimportance: low"
+ },
+ "MaxPartitionFetchBytes": {
+ "type": "integer",
+ "description": "Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.\n\ndefault: 1048576\nimportance: medium"
+ },
+ "MaxPollIntervalMs": {
+ "type": "integer",
+ "description": "Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.\n\ndefault: 300000\nimportance: high"
+ },
+ "MessageCopyMaxBytes": {
+ "type": "integer",
+ "description": "Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.\n\ndefault: 65535\nimportance: low"
+ },
+ "MessageMaxBytes": {
+ "type": "integer",
+ "description": "Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).\n\ndefault: 1000000\nimportance: medium"
+ },
+ "MetadataMaxAgeMs": {
+ "type": "integer",
+ "description": "Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3\n\ndefault: 900000\nimportance: low"
+ },
+ "PartitionAssignmentStrategy": {
+ "enum": [
+ "Range",
+ "RoundRobin",
+ "CooperativeSticky"
+ ],
+ "description": "The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.\n\ndefault: range,roundrobin\nimportance: medium"
+ },
+ "PluginLibraryPaths": {
+ "type": "string",
+ "description": "List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.\n\ndefault: ''\nimportance: low"
+ },
+ "QueuedMaxMessagesKbytes": {
+ "type": "integer",
+ "description": "Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.\n\ndefault: 65536\nimportance: medium"
+ },
+ "QueuedMinMessages": {
+ "type": "integer",
+ "description": "Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.\n\ndefault: 100000\nimportance: medium"
+ },
+ "ReceiveMessageMaxBytes": {
+ "type": "integer",
+ "description": "Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.\n\ndefault: 100000000\nimportance: medium"
+ },
+ "ReconnectBackoffMaxMs": {
+ "type": "integer",
+ "description": "The maximum time to wait before reconnecting to a broker after the connection has been closed.\n\ndefault: 10000\nimportance: medium"
+ },
+ "ReconnectBackoffMs": {
+ "type": "integer",
+ "description": "The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.\n\ndefault: 100\nimportance: medium"
+ },
+ "SaslKerberosKeytab": {
+ "type": "string",
+ "description": "Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t \"%{sasl.kerberos.keytab}\"`.\n\ndefault: ''\nimportance: low"
+ },
+ "SaslKerberosKinitCmd": {
+ "type": "string",
+ "description": "Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.\n\ndefault: kinit -R -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal} || kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}\nimportance: low"
+ },
+ "SaslKerberosMinTimeBeforeRelogin": {
+ "type": "integer",
+ "description": "Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.\n\ndefault: 60000\nimportance: low"
+ },
+ "SaslKerberosPrincipal": {
+ "type": "string",
+ "description": "This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).\n\ndefault: kafkaclient\nimportance: low"
+ },
+ "SaslKerberosServiceName": {
+ "type": "string",
+ "description": "Kerberos principal name that Kafka runs as, not including /hostname@REALM\n\ndefault: kafka\nimportance: low"
+ },
+ "SaslMechanism": {
+ "enum": [
+ "Gssapi",
+ "Plain",
+ "ScramSha256",
+ "ScramSha512",
+ "OAuthBearer"
+ ],
+ "description": "SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name, you may not configure more than one mechanism."
+ },
+ "SaslOauthbearerClientId": {
+ "type": "string",
+ "description": "Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerClientSecret": {
+ "type": "string",
+ "description": "Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerConfig": {
+ "type": "string",
+ "description": "SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is \"sub\", the default value for scopeClaimName is \"scope\", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerExtensions": {
+ "type": "string",
+ "description": "Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., \"supportFeatureX=true,organizationId=sales-emea\".Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerMethod": {
+ "enum": [
+ "Default",
+ "Oidc"
+ ],
+ "description": "Set to \"default\" or \"oidc\" to control which login method to be used. If set to \"oidc\", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`.\n\ndefault: default\nimportance: low"
+ },
+ "SaslOauthbearerScope": {
+ "type": "string",
+ "description": "Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerTokenEndpointUrl": {
+ "type": "string",
+ "description": "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslPassword": {
+ "type": "string",
+ "description": "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism\n\ndefault: ''\nimportance: high"
+ },
+ "SaslUsername": {
+ "type": "string",
+ "description": "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms\n\ndefault: ''\nimportance: high"
+ },
+ "SecurityProtocol": {
+ "enum": [
+ "Plaintext",
+ "Ssl",
+ "SaslPlaintext",
+ "SaslSsl"
+ ],
+ "description": "Protocol used to communicate with brokers.\n\ndefault: plaintext\nimportance: high"
+ },
+ "SessionTimeoutMs": {
+ "type": "integer",
+ "description": "Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.\n\ndefault: 45000\nimportance: high"
+ },
+ "SocketConnectionSetupTimeoutMs": {
+ "type": "integer",
+ "description": "Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried.\n\ndefault: 30000\nimportance: medium"
+ },
+ "SocketKeepaliveEnable": {
+ "type": "boolean",
+ "description": "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets\n\ndefault: false\nimportance: low"
+ },
+ "SocketMaxFails": {
+ "type": "integer",
+ "description": "Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.\n\ndefault: 1\nimportance: low"
+ },
+ "SocketNagleDisable": {
+ "type": "boolean",
+ "description": "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.\n\ndefault: false\nimportance: low"
+ },
+ "SocketReceiveBufferBytes": {
+ "type": "integer",
+ "description": "Broker socket receive buffer size. System default is used if 0.\n\ndefault: 0\nimportance: low"
+ },
+ "SocketSendBufferBytes": {
+ "type": "integer",
+ "description": "Broker socket send buffer size. System default is used if 0.\n\ndefault: 0\nimportance: low"
+ },
+ "SocketTimeoutMs": {
+ "type": "integer",
+ "description": "Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.\n\ndefault: 60000\nimportance: low"
+ },
+ "SslCaCertificateStores": {
+ "type": "string",
+ "description": "Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.\n\ndefault: Root\nimportance: low"
+ },
+ "SslCaLocation": {
+ "type": "string",
+ "description": "File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).\n\ndefault: ''\nimportance: low"
+ },
+ "SslCaPem": {
+ "type": "string",
+ "description": "CA certificate string (PEM format) for verifying the broker's key.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCertificateLocation": {
+ "type": "string",
+ "description": "Path to client's public key (PEM) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCertificatePem": {
+ "type": "string",
+ "description": "Client's public key string (PEM format) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCipherSuites": {
+ "type": "string",
+ "description": "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).\n\ndefault: ''\nimportance: low"
+ },
+ "SslCrlLocation": {
+ "type": "string",
+ "description": "Path to CRL for verifying broker's certificate validity.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCurvesList": {
+ "type": "string",
+ "description": "The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.\n\ndefault: ''\nimportance: low"
+ },
+ "SslEndpointIdentificationAlgorithm": {
+ "enum": [
+ "None",
+ "Https"
+ ],
+ "description": "Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.\n\ndefault: https\nimportance: low"
+ },
+ "SslEngineId": {
+ "type": "string",
+ "description": "OpenSSL engine id is the name used for loading engine.\n\ndefault: dynamic\nimportance: low"
+ },
+ "SslEngineLocation": {
+ "type": "string",
+ "description": "**DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyLocation": {
+ "type": "string",
+ "description": "Path to client's private key (PEM) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyPassword": {
+ "type": "string",
+ "description": "Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyPem": {
+ "type": "string",
+ "description": "Client's private key string (PEM format) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeystoreLocation": {
+ "type": "string",
+ "description": "Path to client's keystore (PKCS#12) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeystorePassword": {
+ "type": "string",
+ "description": "Client's keystore (PKCS#12) password.\n\ndefault: ''\nimportance: low"
+ },
+ "SslProviders": {
+ "type": "string",
+ "description": "Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., \"default,legacy\".\n\ndefault: ''\nimportance: low"
+ },
+ "SslSigalgsList": {
+ "type": "string",
+ "description": "The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.\n\ndefault: ''\nimportance: low"
+ },
+ "StatisticsIntervalMs": {
+ "type": "integer",
+ "description": "librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.\n\ndefault: 0\nimportance: high"
+ },
+ "TopicBlacklist": {
+ "type": "string",
+ "description": "Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.\n\ndefault: ''\nimportance: low"
+ },
+ "TopicMetadataPropagationMaxMs": {
+ "type": "integer",
+ "description": "Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().\n\ndefault: 30000\nimportance: low"
+ },
+ "TopicMetadataRefreshFastIntervalMs": {
+ "type": "integer",
+ "description": "When a topic loses its leader a new metadata request will be enqueued immediately and then with this initial interval, exponentially increasing upto `retry.backoff.max.ms`, until the topic metadata has been refreshed. If not set explicitly, it will be defaulted to `retry.backoff.ms`. This is used to recover quickly from transitioning leader brokers.\n\ndefault: 100\nimportance: low"
+ },
+ "TopicMetadataRefreshIntervalMs": {
+ "type": "integer",
+ "description": "Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.\n\ndefault: 300000\nimportance: low"
+ },
+ "TopicMetadataRefreshSparse": {
+ "type": "boolean",
+ "description": "Sparse metadata requests (consumes less network bandwidth)\n\ndefault: true\nimportance: low"
+ }
+ },
+ "description": "Gets or sets the configuration settings for the Kafka consumer."
+ },
+ "ConnectionString": {
+ "type": "string",
+ "description": "Gets or sets the connection string of the Kafka server to connect to."
+ },
+ "HealthChecks": {
+ "type": "boolean",
+ "description": "Gets or sets a boolean value that indicates whether the Kafka health check is enabled or not."
+ },
+ "Metrics": {
+ "type": "boolean",
+ "description": "Gets or sets a boolean value that indicates whether collecting metrics is enabled or not."
+ }
+ },
+ "description": "Provides the client configuration settings for connecting to a Kafka message broker to consume messages."
+ },
+ "Producer": {
+ "type": "object",
+ "properties": {
+ "Config": {
+ "type": "object",
+ "properties": {
+ "Acks": {
+ "enum": [
+ "None",
+ "Leader",
+ "All"
+ ],
+ "description": "This field indicates the number of acknowledgements the leader broker must receive from ISR brokers\nbefore responding to the request: Zero=Broker does not send any response/ack to client, One=The\nleader will write the record to its local log but will respond without awaiting full acknowledgement\nfrom all followers. All=Broker will block until message is committed by all in sync replicas (ISRs).\nIf there are less than min.insync.replicas (broker configuration) in the ISR set the produce request\nwill fail."
+ },
+ "AllowAutoCreateTopics": {
+ "type": "boolean",
+ "description": "Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.\n\ndefault: false\nimportance: low"
+ },
+ "ApiVersionFallbackMs": {
+ "type": "integer",
+ "description": "Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).\n\ndefault: 0\nimportance: medium"
+ },
+ "ApiVersionRequest": {
+ "type": "boolean",
+ "description": "Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.\n\ndefault: true\nimportance: high"
+ },
+ "ApiVersionRequestTimeoutMs": {
+ "type": "integer",
+ "description": "Timeout for broker API version requests.\n\ndefault: 10000\nimportance: low"
+ },
+ "BatchNumMessages": {
+ "type": "integer",
+ "description": "Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.\n\ndefault: 10000\nimportance: medium"
+ },
+ "BatchSize": {
+ "type": "integer",
+ "description": "Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.\n\ndefault: 1000000\nimportance: medium"
+ },
+ "BootstrapServers": {
+ "type": "string",
+ "description": "Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.\n\ndefault: ''\nimportance: high"
+ },
+ "BrokerAddressFamily": {
+ "enum": [
+ "Any",
+ "V4",
+ "V6"
+ ],
+ "description": "Allowed broker IP address families: any, v4, v6\n\ndefault: any\nimportance: low"
+ },
+ "BrokerAddressTtl": {
+ "type": "integer",
+ "description": "How long to cache the broker address resolving results (milliseconds).\n\ndefault: 1000\nimportance: low"
+ },
+ "BrokerVersionFallback": {
+ "type": "string",
+ "description": "Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.\n\ndefault: 0.10.0\nimportance: medium"
+ },
+ "CancellationDelayMaxMs": {
+ "type": "integer",
+ "description": "The maximum length of time (in milliseconds) before a cancellation request\nis acted on. Low values may result in measurably higher CPU usage.\n\ndefault: 100\nrange: 1 <= dotnet.cancellation.delay.max.ms <= 10000\nimportance: low"
+ },
+ "ClientDnsLookup": {
+ "enum": [
+ "UseAllDnsIps",
+ "ResolveCanonicalBootstrapServersOnly"
+ ],
+ "description": "Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. NOTE: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname.\n\ndefault: use_all_dns_ips\nimportance: low"
+ },
+ "ClientId": {
+ "type": "string",
+ "description": "Client identifier.\n\ndefault: rdkafka\nimportance: low"
+ },
+ "ClientRack": {
+ "type": "string",
+ "description": "A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.\n\ndefault: ''\nimportance: low"
+ },
+ "CompressionLevel": {
+ "type": "integer",
+ "description": "Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.\n\ndefault: -1\nimportance: medium"
+ },
+ "CompressionType": {
+ "enum": [
+ "None",
+ "Gzip",
+ "Snappy",
+ "Lz4",
+ "Zstd"
+ ],
+ "description": "compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.\n\ndefault: none\nimportance: medium"
+ },
+ "ConnectionsMaxIdleMs": {
+ "type": "integer",
+ "description": "Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).\n\ndefault: 0\nimportance: medium"
+ },
+ "Debug": {
+ "type": "string",
+ "description": "A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch\n\ndefault: ''\nimportance: medium"
+ },
+ "DeliveryReportFields": {
+ "type": "string",
+ "description": "A comma separated list of fields that may be optionally set in delivery\nreports. Disabling delivery report fields that you do not require will\nimprove maximum throughput and reduce memory usage. Allowed values:\nkey, value, timestamp, headers, status, all, none.\n\ndefault: all\nimportance: low"
+ },
+ "EnableBackgroundPoll": {
+ "type": "boolean",
+ "description": "Specifies whether or not the producer should start a background poll\nthread to receive delivery reports and event notifications. Generally,\nthis should be set to true. If set to false, you will need to call\nthe Poll function manually.\n\ndefault: true\nimportance: low"
+ },
+ "EnableDeliveryReports": {
+ "type": "boolean",
+ "description": "Specifies whether to enable notification of delivery reports. Typically\nyou should set this parameter to true. Set it to false for \"fire and\nforget\" semantics and a small boost in performance.\n\ndefault: true\nimportance: low"
+ },
+ "EnableGaplessGuarantee": {
+ "type": "boolean",
+ "description": "**EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.\n\ndefault: false\nimportance: low"
+ },
+ "EnableIdempotence": {
+ "type": "boolean",
+ "description": "When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.\n\ndefault: false\nimportance: high"
+ },
+ "EnableRandomSeed": {
+ "type": "boolean",
+ "description": "If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().\n\ndefault: true\nimportance: low"
+ },
+ "EnableSaslOauthbearerUnsecureJwt": {
+ "type": "boolean",
+ "description": "Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.\n\ndefault: false\nimportance: low"
+ },
+ "EnableSslCertificateVerification": {
+ "type": "boolean",
+ "description": "Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.\n\ndefault: true\nimportance: low"
+ },
+ "InternalTerminationSignal": {
+ "type": "integer",
+ "description": "Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.\n\ndefault: 0\nimportance: low"
+ },
+ "LingerMs": {
+ "type": [
+ "number",
+ "string"
+ ],
+ "description": "Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.\n\ndefault: 5\nimportance: high"
+ },
+ "LogConnectionClose": {
+ "type": "boolean",
+ "description": "Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value.\n\ndefault: true\nimportance: low"
+ },
+ "LogQueue": {
+ "type": "boolean",
+ "description": "Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.\n\ndefault: false\nimportance: low"
+ },
+ "LogThreadName": {
+ "type": "boolean",
+ "description": "Print internal thread name in log messages (useful for debugging librdkafka internals)\n\ndefault: true\nimportance: low"
+ },
+ "MaxInFlight": {
+ "type": "integer",
+ "description": "Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.\n\ndefault: 1000000\nimportance: low"
+ },
+ "MessageCopyMaxBytes": {
+ "type": "integer",
+ "description": "Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.\n\ndefault: 65535\nimportance: low"
+ },
+ "MessageMaxBytes": {
+ "type": "integer",
+ "description": "Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).\n\ndefault: 1000000\nimportance: medium"
+ },
+ "MessageSendMaxRetries": {
+ "type": "integer",
+ "description": "How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.\n\ndefault: 2147483647\nimportance: high"
+ },
+ "MessageTimeoutMs": {
+ "type": "integer",
+ "description": "Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.\n\ndefault: 300000\nimportance: high"
+ },
+ "MetadataMaxAgeMs": {
+ "type": "integer",
+ "description": "Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3\n\ndefault: 900000\nimportance: low"
+ },
+ "Partitioner": {
+ "enum": [
+ "Random",
+ "Consistent",
+ "ConsistentRandom",
+ "Murmur2",
+ "Murmur2Random"
+ ],
+ "description": "Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).\n\ndefault: consistent_random\nimportance: high"
+ },
+ "PluginLibraryPaths": {
+ "type": "string",
+ "description": "List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.\n\ndefault: ''\nimportance: low"
+ },
+ "QueueBufferingBackpressureThreshold": {
+ "type": "integer",
+ "description": "The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.\n\ndefault: 1\nimportance: low"
+ },
+ "QueueBufferingMaxKbytes": {
+ "type": "integer",
+ "description": "Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.\n\ndefault: 1048576\nimportance: high"
+ },
+ "QueueBufferingMaxMessages": {
+ "type": "integer",
+ "description": "Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit.\n\ndefault: 100000\nimportance: high"
+ },
+ "ReceiveMessageMaxBytes": {
+ "type": "integer",
+ "description": "Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.\n\ndefault: 100000000\nimportance: medium"
+ },
+ "ReconnectBackoffMaxMs": {
+ "type": "integer",
+ "description": "The maximum time to wait before reconnecting to a broker after the connection has been closed.\n\ndefault: 10000\nimportance: medium"
+ },
+ "ReconnectBackoffMs": {
+ "type": "integer",
+ "description": "The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.\n\ndefault: 100\nimportance: medium"
+ },
+ "RequestTimeoutMs": {
+ "type": "integer",
+ "description": "The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.\n\ndefault: 30000\nimportance: medium"
+ },
+ "RetryBackoffMaxMs": {
+ "type": "integer",
+ "description": "The max backoff time in milliseconds before retrying a protocol request, this is the atmost backoff allowed for exponentially backed off requests.\n\ndefault: 1000\nimportance: medium"
+ },
+ "RetryBackoffMs": {
+ "type": "integer",
+ "description": "The backoff time in milliseconds before retrying a protocol request, this is the first backoff time, and will be backed off exponentially until number of retries is exhausted, and it's capped by retry.backoff.max.ms.\n\ndefault: 100\nimportance: medium"
+ },
+ "SaslKerberosKeytab": {
+ "type": "string",
+ "description": "Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t \"%{sasl.kerberos.keytab}\"`.\n\ndefault: ''\nimportance: low"
+ },
+ "SaslKerberosKinitCmd": {
+ "type": "string",
+ "description": "Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.\n\ndefault: kinit -R -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal} || kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}\nimportance: low"
+ },
+ "SaslKerberosMinTimeBeforeRelogin": {
+ "type": "integer",
+ "description": "Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.\n\ndefault: 60000\nimportance: low"
+ },
+ "SaslKerberosPrincipal": {
+ "type": "string",
+ "description": "This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).\n\ndefault: kafkaclient\nimportance: low"
+ },
+ "SaslKerberosServiceName": {
+ "type": "string",
+ "description": "Kerberos principal name that Kafka runs as, not including /hostname@REALM\n\ndefault: kafka\nimportance: low"
+ },
+ "SaslMechanism": {
+ "enum": [
+ "Gssapi",
+ "Plain",
+ "ScramSha256",
+ "ScramSha512",
+ "OAuthBearer"
+ ],
+ "description": "SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name, you may not configure more than one mechanism."
+ },
+ "SaslOauthbearerClientId": {
+ "type": "string",
+ "description": "Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerClientSecret": {
+ "type": "string",
+ "description": "Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerConfig": {
+ "type": "string",
+ "description": "SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is \"sub\", the default value for scopeClaimName is \"scope\", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerExtensions": {
+ "type": "string",
+ "description": "Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., \"supportFeatureX=true,organizationId=sales-emea\".Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerMethod": {
+ "enum": [
+ "Default",
+ "Oidc"
+ ],
+ "description": "Set to \"default\" or \"oidc\" to control which login method to be used. If set to \"oidc\", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`.\n\ndefault: default\nimportance: low"
+ },
+ "SaslOauthbearerScope": {
+ "type": "string",
+ "description": "Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslOauthbearerTokenEndpointUrl": {
+ "type": "string",
+ "description": "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to \"oidc\".\n\ndefault: ''\nimportance: low"
+ },
+ "SaslPassword": {
+ "type": "string",
+ "description": "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism\n\ndefault: ''\nimportance: high"
+ },
+ "SaslUsername": {
+ "type": "string",
+ "description": "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms\n\ndefault: ''\nimportance: high"
+ },
+ "SecurityProtocol": {
+ "enum": [
+ "Plaintext",
+ "Ssl",
+ "SaslPlaintext",
+ "SaslSsl"
+ ],
+ "description": "Protocol used to communicate with brokers.\n\ndefault: plaintext\nimportance: high"
+ },
+ "SocketConnectionSetupTimeoutMs": {
+ "type": "integer",
+ "description": "Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried.\n\ndefault: 30000\nimportance: medium"
+ },
+ "SocketKeepaliveEnable": {
+ "type": "boolean",
+ "description": "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets\n\ndefault: false\nimportance: low"
+ },
+ "SocketMaxFails": {
+ "type": "integer",
+ "description": "Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.\n\ndefault: 1\nimportance: low"
+ },
+ "SocketNagleDisable": {
+ "type": "boolean",
+ "description": "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.\n\ndefault: false\nimportance: low"
+ },
+ "SocketReceiveBufferBytes": {
+ "type": "integer",
+ "description": "Broker socket receive buffer size. System default is used if 0.\n\ndefault: 0\nimportance: low"
+ },
+ "SocketSendBufferBytes": {
+ "type": "integer",
+ "description": "Broker socket send buffer size. System default is used if 0.\n\ndefault: 0\nimportance: low"
+ },
+ "SocketTimeoutMs": {
+ "type": "integer",
+ "description": "Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.\n\ndefault: 60000\nimportance: low"
+ },
+ "SslCaCertificateStores": {
+ "type": "string",
+ "description": "Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.\n\ndefault: Root\nimportance: low"
+ },
+ "SslCaLocation": {
+ "type": "string",
+ "description": "File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).\n\ndefault: ''\nimportance: low"
+ },
+ "SslCaPem": {
+ "type": "string",
+ "description": "CA certificate string (PEM format) for verifying the broker's key.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCertificateLocation": {
+ "type": "string",
+ "description": "Path to client's public key (PEM) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCertificatePem": {
+ "type": "string",
+ "description": "Client's public key string (PEM format) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCipherSuites": {
+ "type": "string",
+ "description": "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).\n\ndefault: ''\nimportance: low"
+ },
+ "SslCrlLocation": {
+ "type": "string",
+ "description": "Path to CRL for verifying broker's certificate validity.\n\ndefault: ''\nimportance: low"
+ },
+ "SslCurvesList": {
+ "type": "string",
+ "description": "The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.\n\ndefault: ''\nimportance: low"
+ },
+ "SslEndpointIdentificationAlgorithm": {
+ "enum": [
+ "None",
+ "Https"
+ ],
+ "description": "Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.\n\ndefault: https\nimportance: low"
+ },
+ "SslEngineId": {
+ "type": "string",
+ "description": "OpenSSL engine id is the name used for loading engine.\n\ndefault: dynamic\nimportance: low"
+ },
+ "SslEngineLocation": {
+ "type": "string",
+ "description": "**DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyLocation": {
+ "type": "string",
+ "description": "Path to client's private key (PEM) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyPassword": {
+ "type": "string",
+ "description": "Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeyPem": {
+ "type": "string",
+ "description": "Client's private key string (PEM format) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeystoreLocation": {
+ "type": "string",
+ "description": "Path to client's keystore (PKCS#12) used for authentication.\n\ndefault: ''\nimportance: low"
+ },
+ "SslKeystorePassword": {
+ "type": "string",
+ "description": "Client's keystore (PKCS#12) password.\n\ndefault: ''\nimportance: low"
+ },
+ "SslProviders": {
+ "type": "string",
+ "description": "Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., \"default,legacy\".\n\ndefault: ''\nimportance: low"
+ },
+ "SslSigalgsList": {
+ "type": "string",
+ "description": "The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.\n\ndefault: ''\nimportance: low"
+ },
+ "StatisticsIntervalMs": {
+ "type": "integer",
+ "description": "librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.\n\ndefault: 0\nimportance: high"
+ },
+ "StickyPartitioningLingerMs": {
+ "type": "integer",
+ "description": "Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.\n\ndefault: 10\nimportance: low"
+ },
+ "TopicBlacklist": {
+ "type": "string",
+ "description": "Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.\n\ndefault: ''\nimportance: low"
+ },
+ "TopicMetadataPropagationMaxMs": {
+ "type": "integer",
+ "description": "Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().\n\ndefault: 30000\nimportance: low"
+ },
+ "TopicMetadataRefreshFastIntervalMs": {
+ "type": "integer",
+ "description": "When a topic loses its leader a new metadata request will be enqueued immediately and then with this initial interval, exponentially increasing upto `retry.backoff.max.ms`, until the topic metadata has been refreshed. If not set explicitly, it will be defaulted to `retry.backoff.ms`. This is used to recover quickly from transitioning leader brokers.\n\ndefault: 100\nimportance: low"
+ },
+ "TopicMetadataRefreshIntervalMs": {
+ "type": "integer",
+ "description": "Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.\n\ndefault: 300000\nimportance: low"
+ },
+ "TopicMetadataRefreshSparse": {
+ "type": "boolean",
+ "description": "Sparse metadata requests (consumes less network bandwidth)\n\ndefault: true\nimportance: low"
+ },
+ "TransactionTimeoutMs": {
+ "type": "integer",
+ "description": "The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.\n\ndefault: 60000\nimportance: medium"
+ },
+ "TransactionalId": {
+ "type": "string",
+ "description": "Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.\n\ndefault: ''\nimportance: high"
+ }
+ },
+ "description": "Gets or sets the configuration settings for the Kafka producer."
+ },
+ "ConnectionString": {
+ "type": "string",
+ "description": "Gets or sets the connection string of the Kafka server to connect to."
+ },
+ "HealthChecks": {
+ "type": "boolean",
+ "description": "Gets or sets a boolean value that indicates whether the Kafka health check is enabled or not.",
+ "default": true
+ },
+ "Metrics": {
+ "type": "boolean",
+ "description": "Gets or sets a boolean value that indicates whether collecting metrics is enabled or not.",
+ "default": true
+ }
+ },
+ "description": "Provides the client configuration settings for connecting to a Kafka message broker to produce messages."
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "type": "object"
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaCommon.cs b/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaCommon.cs
new file mode 100644
index 0000000000..e4939ad207
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaCommon.cs
@@ -0,0 +1,16 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Aspire.Confluent.Kafka;
+
+internal sealed class ConfluentKafkaCommon
+{
+ public const string MeterName = "Aspire.Confluent.Kafka";
+
+ public const string ProducerHealthCheckName = "Aspire.Confluent.Kafka.Producer";
+ public const string ConsumerHealthCheckName = "Aspire.Confluent.Kafka.Consumer";
+ public const string KeyedProducerHealthCheckName = "Aspire.Confluent.Kafka.Producer_";
+ public const string KeyedConsumerHealthCheckName = "Aspire.Confluent.Kafka.Consumer_";
+
+ public const string LogCategoryName = "Aspire.Confluent.Kafka";
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaMetrics.cs b/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaMetrics.cs
new file mode 100644
index 0000000000..bcffec2c15
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/ConfluentKafkaMetrics.cs
@@ -0,0 +1,108 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Collections.Concurrent;
+using System.Diagnostics.Metrics;
+
+namespace Aspire.Confluent.Kafka;
+
+internal sealed class ConfluentKafkaMetrics
+{
+ private readonly Meter _meter;
+
+ public Counter Tx { get; }
+ public Counter TxBytes { get; }
+ public Counter TxMessages { get; }
+ public Counter TxMessageBytes { get; }
+ public Counter Rx { get; }
+ public Counter RxBytes { get; }
+ public Counter RxMessages { get; }
+ public Counter RxMessageBytes { get; }
+
+ public ConcurrentQueue> ReplyQueueMeasurements { get; } = new ConcurrentQueue>();
+ public ConcurrentQueue> MessageCountMeasurements { get; } = new ConcurrentQueue>();
+ public ConcurrentQueue> MessageSizeMeasurements { get; } = new ConcurrentQueue>();
+
+ public ConfluentKafkaMetrics(IMeterFactory meterFactory)
+ {
+ _meter = meterFactory.Create(ConfluentKafkaCommon.MeterName);
+
+ _meter.CreateObservableGauge(Gauges.ReplyQueue, GetReplyQMeasurements, Descriptions.ReplyQueue);
+ _meter.CreateObservableGauge(Gauges.MessageCount, GetMessageCountMeasurements, Descriptions.MessageCount);
+ _meter.CreateObservableGauge(Gauges.MessageSize, GetMessageSizeMeasurements, Descriptions.MessageSize);
+
+ Tx = _meter.CreateCounter(Counters.Tx, Descriptions.Tx);
+ TxBytes = _meter.CreateCounter(Counters.TxBytes, Descriptions.TxBytes);
+ TxMessages = _meter.CreateCounter(Counters.TxMessages, Descriptions.TxMessages);
+ TxMessageBytes = _meter.CreateCounter(Counters.TxMessageBytes, Descriptions.TxMessageBytes);
+ Rx = _meter.CreateCounter(Counters.Rx, Descriptions.Rx);
+ RxBytes = _meter.CreateCounter(Counters.RxBytes, Descriptions.RxBytes);
+ RxMessages = _meter.CreateCounter(Counters.RxMessages, Descriptions.RxMessages);
+ RxMessageBytes = _meter.CreateCounter(Counters.RxMessageBytes, Descriptions.RxMessageBytes);
+ }
+
+ public static class Gauges
+ {
+ public const string ReplyQueue = "messaging.kafka.consumer.queue.message_count";
+ public const string MessageCount = "messaging.kafka.producer.queue.message_count";
+ public const string MessageSize = "messaging.kafka.producer.queue.size";
+ }
+
+ public static class Counters
+ {
+ public const string Tx = "messaging.kafka.network.tx";
+ public const string TxBytes = "messaging.kafka.network.transmitted";
+ public const string Rx = "messaging.kafka.network.rx";
+ public const string RxBytes = "messaging.kafka.network.received";
+ public const string TxMessages = "messaging.publish.messages";
+ public const string TxMessageBytes = "messaging.kafka.message.transmitted";
+ public const string RxMessages = "messaging.receive.messages";
+ public const string RxMessageBytes = "messaging.kafka.message.received";
+ }
+
+ public static class Tags
+ {
+ public const string ClientId = "messaging.client_id";
+ public const string Type = "type";
+ public const string Name = "name";
+ }
+
+ private static class Descriptions
+ {
+ public const string ReplyQueue = "Number of ops (callbacks, events, etc) waiting in queue for application to serve with rd_kafka_poll()";
+ public const string MessageCount = "Current number of messages in producer queues";
+ public const string MessageSize = "Current total size of messages in producer queues";
+ public const string Tx = "Total number of requests sent to Kafka brokers";
+ public const string TxBytes = "Total number of bytes transmitted to Kafka brokers";
+ public const string Rx = "Total number of responses received from Kafka brokers";
+ public const string RxBytes = "Total number of bytes received from Kafka brokers";
+ public const string TxMessages = "Total number of messages transmitted (produced) to Kafka brokers";
+ public const string TxMessageBytes = "Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers";
+ public const string RxMessages = "Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers";
+ public const string RxMessageBytes = "Total number of message bytes (including framing) received from Kafka brokers";
+ }
+
+ private IEnumerable> GetReplyQMeasurements()
+ {
+ while (ReplyQueueMeasurements.TryDequeue(out var measurement))
+ {
+ yield return measurement;
+ }
+ }
+
+ private IEnumerable> GetMessageCountMeasurements()
+ {
+ while (MessageCountMeasurements.TryDequeue(out var measurement))
+ {
+ yield return measurement;
+ }
+ }
+
+ private IEnumerable> GetMessageSizeMeasurements()
+ {
+ while (MessageSizeMeasurements.TryDequeue(out var measurement))
+ {
+ yield return measurement;
+ }
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/ConsumerConnectionFactory.cs b/src/Components/Aspire.Confluent.Kafka/ConsumerConnectionFactory.cs
new file mode 100644
index 0000000000..37dc7fd8b8
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/ConsumerConnectionFactory.cs
@@ -0,0 +1,27 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Confluent.Kafka;
+
+namespace Aspire.Confluent.Kafka;
+
+internal sealed class ConsumerConnectionFactory
+{
+ private readonly ConsumerBuilder _consumerBuilder;
+ private readonly ConsumerConfig _consumerConfig;
+
+ public ConsumerConnectionFactory(ConsumerBuilder consumerBuilder, ConsumerConfig consumerConfig)
+ {
+ _consumerConfig = new ConsumerConfig();
+ foreach (var property in consumerConfig)
+ {
+ _consumerConfig.Set(property.Key, property.Value);
+ }
+ _consumerBuilder = consumerBuilder;
+ }
+
+ public ConsumerConfig Config => _consumerConfig;
+
+ public IConsumer Create() => _consumerBuilder.Build();
+}
+
diff --git a/src/Components/Aspire.Confluent.Kafka/KafkaConsumerSettings.cs b/src/Components/Aspire.Confluent.Kafka/KafkaConsumerSettings.cs
new file mode 100644
index 0000000000..098b3c7ba7
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/KafkaConsumerSettings.cs
@@ -0,0 +1,61 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using Confluent.Kafka;
+
+namespace Aspire.Confluent.Kafka;
+
+///
+/// Provides the client configuration settings for connecting to a Kafka message broker to consume messages.
+///
+public sealed class KafkaConsumerSettings
+{
+ ///
+ /// Gets or sets the connection string of the Kafka server to connect to.
+ ///
+ public string? ConnectionString { get; set; }
+
+ ///
+ /// Gets the configuration settings for the Kafka consumer.
+ ///
+ public ConsumerConfig Config { get; } = new ConsumerConfig();
+
+ ///
+ /// Gets or sets a boolean value that indicates whether collecting metrics is enabled or not.
+ ///
+ public bool Metrics { get; set; } = true;
+
+ ///
+ /// Gets or sets a boolean value that indicates whether the Kafka health check is enabled or not.
+ ///
+ public bool HealthChecks { get; set; } = true;
+
+ internal void Consolidate()
+ {
+ Debug.Assert(Config is not null);
+
+ if (ConnectionString is not null)
+ {
+ Config.BootstrapServers = ConnectionString;
+ }
+
+ if (Metrics)
+ {
+ Config.StatisticsIntervalMs ??= 1000;
+ }
+ }
+
+ internal void Validate()
+ {
+ if (Config.BootstrapServers is null)
+ {
+ throw new InvalidOperationException("No bootstrap servers configured.");
+ }
+
+ if (Config.GroupId is null)
+ {
+ throw new InvalidOperationException("No group id configured.");
+ }
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/KafkaProducerSettings.cs b/src/Components/Aspire.Confluent.Kafka/KafkaProducerSettings.cs
new file mode 100644
index 0000000000..f79e42955f
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/KafkaProducerSettings.cs
@@ -0,0 +1,62 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using Confluent.Kafka;
+
+namespace Aspire.Confluent.Kafka;
+
+///
+/// Provides the client configuration settings for connecting to a Kafka message broker to produce messages.
+///
+public sealed class KafkaProducerSettings
+{
+ ///
+ /// Gets or sets the connection string of the Kafka server to connect to.
+ ///
+ public string? ConnectionString { get; set; }
+
+ ///
+ /// Gets the configuration settings for the Kafka producer.
+ ///
+ public ProducerConfig Config { get; } = new ProducerConfig();
+
+ ///
+ /// Gets or sets a boolean value that indicates whether collecting metrics is enabled or not.
+ ///
+ ///
+ /// The default value is .
+ ///
+ public bool Metrics { get; set; } = true;
+
+ ///
+ /// Gets or sets a boolean value that indicates whether the Kafka health check is enabled or not.
+ ///
+ ///
+ /// The default value is .
+ ///
+ public bool HealthChecks { get; set; } = true;
+
+ internal void Consolidate()
+ {
+ Debug.Assert(Config is not null);
+
+ if (ConnectionString is not null)
+ {
+ Config.BootstrapServers = ConnectionString;
+ }
+
+ if (Metrics)
+ {
+ Config.StatisticsIntervalMs ??= 1000;
+ }
+ }
+
+ internal void Validate()
+ {
+ if (string.IsNullOrEmpty(Config.BootstrapServers))
+ {
+ throw new InvalidOperationException("No bootstrap servers configured.");
+ }
+ }
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/MetricsChannel.cs b/src/Components/Aspire.Confluent.Kafka/MetricsChannel.cs
new file mode 100644
index 0000000000..30f409bd81
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/MetricsChannel.cs
@@ -0,0 +1,25 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Threading.Channels;
+using Confluent.Kafka;
+
+namespace Aspire.Confluent.Kafka;
+
+///
+/// Metrics are emitted using json data published by librdkafka StatisticsHandler (see https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ConsumerBuilder-2.html#Confluent_Kafka_ConsumerBuilder_2_StatisticsHandler)
+/// The is written by both StatisticsHandler of and and aims
+/// to avoid slowing down 's consume thread and 's poll thread by offloading the processing of the json.
+/// The json processing is performed by .
+///
+internal sealed class MetricsChannel
+{
+ private readonly Channel _channel = Channel.CreateBounded(new BoundedChannelOptions(10_000)
+ {
+ SingleReader = true,
+ SingleWriter = false
+ });
+
+ public ChannelReader Reader => _channel.Reader;
+ public ChannelWriter Writer => _channel.Writer;
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/MetricsService.cs b/src/Components/Aspire.Confluent.Kafka/MetricsService.cs
new file mode 100644
index 0000000000..5d5d0106ee
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/MetricsService.cs
@@ -0,0 +1,82 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using System.Diagnostics.Metrics;
+using System.Text.Json;
+using Microsoft.Extensions.Hosting;
+using Microsoft.Extensions.Logging;
+using static Aspire.Confluent.Kafka.ConfluentKafkaMetrics;
+
+namespace Aspire.Confluent.Kafka;
+
+internal sealed partial class MetricsService(MetricsChannel channel, ConfluentKafkaMetrics metrics, ILogger logger) : BackgroundService
+{
+ private readonly Dictionary _state = new();
+
+ protected override async Task ExecuteAsync(CancellationToken stoppingToken)
+ {
+ while (await channel.Reader.WaitToReadAsync(stoppingToken).ConfigureAwait(false))
+ {
+ while (channel.Reader.TryRead(out var json))
+ {
+ Statistics? statistics;
+ try
+ {
+ statistics = JsonSerializer.Deserialize(json, StatisticsJsonSerializerContext.Default.Statistics);
+ }
+ catch
+ {
+ LogDeserializationWarning(logger, json);
+ continue;
+ }
+
+ if (statistics == null || statistics.Name == null)
+ {
+ LogDeserializationWarning(logger, json);
+ continue;
+ }
+
+ TagList tags = new()
+ {
+ { Tags.ClientId, statistics.ClientId },
+ { Tags.Name, statistics.Name }
+ };
+
+ metrics.ReplyQueueMeasurements.Enqueue(new Measurement(statistics.ReplyQueue, tags));
+ metrics.MessageCountMeasurements.Enqueue(new Measurement(statistics.MessageCount, tags));
+ metrics.MessageSizeMeasurements.Enqueue(new Measurement(statistics.MessageSize, tags));
+
+ tags.Add(new KeyValuePair (Tags.Type, statistics.Type));
+
+ if (_state.TryGetValue(statistics.Name, out var previous))
+ {
+ metrics.Tx.Add(statistics.Tx - previous.Tx, tags);
+ metrics.TxBytes.Add(statistics.TxBytes - previous.TxBytes, tags);
+ metrics.TxMessages.Add(statistics.TxMessages - previous.TxMessages, tags);
+ metrics.TxMessageBytes.Add(statistics.TxMessageBytes - previous.TxMessageBytes, tags);
+ metrics.Rx.Add(statistics.Rx - previous.Rx, tags);
+ metrics.RxBytes.Add(statistics.RxBytes - previous.RxBytes, tags);
+ metrics.RxMessages.Add(statistics.RxMessages - previous.RxMessages, tags);
+ metrics.RxMessageBytes.Add(statistics.RxMessageBytes - previous.RxMessageBytes, tags);
+ }
+ else
+ {
+ metrics.Tx.Add(statistics.Tx, tags);
+ metrics.TxBytes.Add(statistics.TxBytes, tags);
+ metrics.TxMessages.Add(statistics.TxMessages, tags);
+ metrics.TxMessageBytes.Add(statistics.TxMessageBytes, tags);
+ metrics.Rx.Add(statistics.Rx, tags);
+ metrics.RxBytes.Add(statistics.RxBytes, tags);
+ metrics.RxMessages.Add(statistics.RxMessages, tags);
+ metrics.RxMessageBytes.Add(statistics.RxMessageBytes, tags);
+ }
+
+ _state[statistics.Name] = statistics;
+ }
+ }
+ }
+
+ [LoggerMessage(LogLevel.Warning, EventId = 1, Message = "Invalid statistics json payload received: `{json}`")]
+ private static partial void LogDeserializationWarning(ILogger logger, string json);
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/ProducerConnectionFactory.cs b/src/Components/Aspire.Confluent.Kafka/ProducerConnectionFactory.cs
new file mode 100644
index 0000000000..f08e5e96bd
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/ProducerConnectionFactory.cs
@@ -0,0 +1,26 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Confluent.Kafka;
+
+namespace Aspire.Confluent.Kafka;
+
+internal sealed class ProducerConnectionFactory
+{
+ private readonly ProducerBuilder _producerBuilder;
+ private readonly ProducerConfig _producerConfig;
+
+ public ProducerConnectionFactory(ProducerBuilder producerBuilder, ProducerConfig producerConfig)
+ {
+ _producerConfig = new ProducerConfig();
+ foreach (var property in producerConfig)
+ {
+ _producerConfig.Set(property.Key, property.Value);
+ }
+ _producerBuilder = producerBuilder;
+ }
+
+ public ProducerConfig Config => _producerConfig;
+
+ public IProducer Create() => _producerBuilder.Build();
+}
diff --git a/src/Components/Aspire.Confluent.Kafka/README.md b/src/Components/Aspire.Confluent.Kafka/README.md
new file mode 100644
index 0000000000..126e896fa5
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/README.md
@@ -0,0 +1,157 @@
+# Aspire.Confluent.Kafka library
+
+Provides ability to registers an [IProducer](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.IProducer-2.html) and an [IConsumer](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.IConsumer-2.html) in the DI container for producing and consuming messages to an Apache Kafka broker. Enables corresponding health check, logging and metrics.
+This library wraps Confluent.Kafka binaries.
+
+## Getting started
+
+### Prerequisites
+
+- An Apache Kafka broker.
+
+### Install the package
+
+Install the .NET Aspire Confluent Kafka library with [NuGet](https://www.nuget.org):
+
+```dotnetcli
+dotnet add package Aspire.Confluent.Kafka
+```
+
+## Usage example
+
+In the _Program.cs_ file of your project, call the `AddKafkaProducer` extension method to register an `IProducer` for use via the dependency injection container. The method takes two generic parameters corresponding to the type of the key and the type of the message to send to the broker. These generic parameters will be used to new an instance of `ProducerBuilder`. This method also take connection name parameter.
+
+```csharp
+builder.AddKafkaProducer("messaging");
+```
+
+You can then retrieve the `IProducer` instance using dependency injection. For example, to retrieve the producer from an `IHostedService`:
+
+```csharp
+internal sealed class MyWorker(IProducer producer) : BackgroundService
+{
+ protected override async Task ExecuteAsync(CancellationToken stoppingToken)
+ {
+ using var timer = new PeriodicTimer(TimeSpan.FromMilliseconds(10));
+ long i = 0;
+ while (await timer.WaitForNextTickAsync(stoppingToken))
+ {
+ var message = new Message
+ {
+ Key = Guid.NewGuid.ToString(),
+ Value = $"Hello, World! {i}"
+ };
+ producer.Produce("topic", message);
+ logger.LogInformation($"{producer.Name} sent message '{message.Value}'");
+ i++;
+ }
+ }
+}
+```
+
+You can refer to [Confluent's Apache Kafka .NET Client documentatoin](https://docs.confluent.io/kafka-clients/dotnet/current/overview.html) for more information about how to use the `IProducer` efficiently.
+
+## Configuration
+
+The .NET Aspire Confluent Kafka component provides multiple options to configure the connection based on the requirements and conventions of your project.
+
+### Use a connection string
+
+When using a connection string from the `ConnectionStrings` configuration section, you can provide the name of the connection string when calling `builder.AddKafkaProducer()` or `builder.AddKafkaProducer()`:
+
+```csharp
+builder.AddKafkaProducer("myConnection");
+```
+
+And then the connection string will be retrieved from the `ConnectionStrings` configuration section:
+
+```json
+{
+ "ConnectionStrings": {
+ "myConnection": "broker:9092"
+ }
+}
+```
+
+The value provided as connection string will be set to the `BootstrapServers` property of the produced `IProducer` or `IConsumer` instance. Refer to [BootstrapServers](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ClientConfig.html#Confluent_Kafka_ClientConfig_BootstrapServers) for more information.
+
+### Use configuration providers
+
+The .NET Aspire Confluent Kafka component supports [Microsoft.Extensions.Configuration](https://learn.microsoft.com/dotnet/api/microsoft.extensions.configuration). It loads the `KafkaProducerSettings` or `KafkaConsumerSettings` from configuration by respectively using the `Aspire:Confluent:Kafka:Producer` and `Aspire.Confluent:Kafka:Consumer` keys. Example `appsettings.json` that configures some of the options:
+
+```json
+{
+ "Aspire": {
+ "Confluent": {
+ "Kafka": {
+ "Producer": {
+ "HealthChecks": true,
+ "Config": {
+ "Acks": "All"
+ }
+ }
+ }
+ }
+ }
+}
+```
+
+The `Config` properties of both `Aspire:Confluent:Kafka:Producer` and `Aspire.Confluent:Kafka:Consumer` configuration sections respectively bind to instances of [`ProducerConfig`](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ProducerConfig.html) and [`ConsumerConfig`](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ConsumerConfig.html).
+
+`Confluent.Kafka.Consumer` requires the `ClientId` property to be set to let the broker track consumed message offsets.
+
+### Use inline delegates to configure `KafkaProducerSettings` and `KafkaConsumerSettings`.
+
+Also you can pass the `Action configureSettings` delegate to set up some or all the options inline, for example to disable health checks from code:
+
+```csharp
+ builder.AddKafkaProducer("messaging", settings => settings.HealthChecks = false);
+```
+
+Similarly you can configure inline a consumer from code:
+```c#
+ builder.AddKafkaConsumer("messaging", settings => settings.HealthChecks = false);
+```
+
+### Use inline delegates to configure `ProducerBuilder` and `ConsumerBuilder`.
+
+To configure `Confluent.Kafka` builders (for example to setup custom serializers/deserializers for message key and value) you can pass an `Action>` (or `Action>`) from code:
+```c#
+ builder.AddKafkaProducer("messaging", producerBuilder => {
+ producerBuilder.SetValueSerializer(new MyMessageSerializer());
+ })
+```
+
+You can refer to [`ProducerBuilder`](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ProducerBuilder-2.html) and [`ConsumerBuilder`](https://docs.confluent.io/platform/current/clients/confluent-kafka-dotnet/_site/api/Confluent.Kafka.ConsumerBuilder-2.html) api documentation for more information.
+
+## AppHost extensions
+
+In your AppHost project, register an Apache Kafka container and consume the connection using the following methods:
+
+```csharp
+var messaging = builder.AddKafka("messaging");
+
+var myService = builder.AddProject()
+ .WithReference(messaging);
+```
+
+The `WithReference` method configures a connection in the `MyService` project named `messaging`. In the _Program.cs_ file of `MyService`, the Apache Kafka broker connection can be consumed using:
+
+```csharp
+builder.AddKafkaProducer("messaging");
+```
+
+or
+
+```csharp
+builder.AddKafkaConsumer("messaging");
+```
+
+## Additional documentation
+
+* https://docs.confluent.io/kafka-clients/dotnet/current/overview.html
+* https://github.com/dotnet/aspire/tree/main/src/Components/README.md
+
+## Feedback & contributing
+
+https://github.com/dotnet/aspire
diff --git a/src/Components/Aspire.Confluent.Kafka/Statistics.cs b/src/Components/Aspire.Confluent.Kafka/Statistics.cs
new file mode 100644
index 0000000000..d72632e7b5
--- /dev/null
+++ b/src/Components/Aspire.Confluent.Kafka/Statistics.cs
@@ -0,0 +1,62 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Text.Json.Serialization;
+
+namespace Aspire.Confluent.Kafka;
+
+///
+/// Maps to the JSON output returned by the librdkafka statistics API.
+///
+internal sealed class Statistics
+{
+ [JsonPropertyName("name")]
+ public string? Name { get; set; }
+ [JsonPropertyName("client_id")]
+ public string? ClientId { get; set; }
+ [JsonPropertyName("type")]
+ public string? Type { get; set; }
+ [JsonPropertyName("ts")]
+ public long Timestamp { get; set; }
+ [JsonPropertyName("time")]
+ public long Time { get; set; }
+ [JsonPropertyName("age")]
+ public long Age { get; set; }
+ [JsonPropertyName("replyq")]
+ public long ReplyQueue { get; set; }
+ [JsonPropertyName("msg_cnt")]
+ public long MessageCount { get; set; }
+ [JsonPropertyName("msg_size")]
+ public long MessageSize { get; set; }
+ [JsonPropertyName("msg_max")]
+ public long MessageMax { get; set; }
+ [JsonPropertyName("msg_size_max")]
+ public long MessageSizeMax { get; set; }
+ [JsonPropertyName("tx")]
+ public long Tx { get; set; }
+ [JsonPropertyName("tx_bytes")]
+ public long TxBytes { get; set; }
+ [JsonPropertyName("rx")]
+ public long Rx { get; set; }
+ [JsonPropertyName("rx_bytes")]
+ public long RxBytes { get; set; }
+ [JsonPropertyName("txmsgs")]
+ public long TxMessages { get; set; }
+ [JsonPropertyName("txmsg_bytes")]
+ public long TxMessageBytes { get; set; }
+ [JsonPropertyName("rxmsgs")]
+ public long RxMessages { get; set; }
+ [JsonPropertyName("rxmsg_bytes")]
+ public long RxMessageBytes { get; set; }
+ [JsonPropertyName("simple_cnt")]
+ public long SimpleCount { get; set; }
+ [JsonPropertyName("metadata_cache_cnt")]
+ public long MetadataCacheCount { get; set; }
+}
+
+[JsonSerializable(typeof(Statistics))]
+[JsonSourceGenerationOptions]
+internal sealed partial class StatisticsJsonSerializerContext : JsonSerializerContext
+{
+
+}
diff --git a/src/Components/Aspire_Components_Progress.md b/src/Components/Aspire_Components_Progress.md
index beec376e8f..44e2925aac 100644
--- a/src/Components/Aspire_Components_Progress.md
+++ b/src/Components/Aspire_Components_Progress.md
@@ -23,6 +23,7 @@ As part of the .NET Aspire November preview, we want to include a set of .NET As
| RabbitMQ | ✅ | ✅ | ✅ | ✅ | | | ❌ | ✅ |
| MySqlConnector | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Oracle.EntityFrameworkCore | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Confluent.Kafka | ✅ | | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
Nomenclature used in the table above:
diff --git a/src/Components/Telemetry.md b/src/Components/Telemetry.md
index 08e7a44faa..645ea12f69 100644
--- a/src/Components/Telemetry.md
+++ b/src/Components/Telemetry.md
@@ -47,6 +47,25 @@ Aspire.Azure.Storage.Queues:
- Metric names:
- none (currently not supported by the Azure SDK)
+Aspire.Confluent.Kafka:
+- Log categories:
+ - "Aspire.Confluent.Kafka"
+- Activity source names:
+ - N/A
+- Metric names:
+ - "Aspire.Confluent.Kafka"
+ - "messaging.kafka.consumer.queue.message_count"
+ - "messaging.kafka.producer.queue.message_count"
+ - "messaging.kafka.producer.queue.size"
+ - "messaging.kafka.network.tx"
+ - "messaging.kafka.network.transmitted"
+ - "messaging.kafka.network.rx"
+ - "messaging.kafka.network.received"
+ - "messaging.kafka.message.tx"
+ - "messaging.kafka.message.transmitted"
+ - "messaging.kafka.message.rx"
+ - "messaging.kafka.message.received"
+
Aspire.Microsoft.Azure.Cosmos:
- Log categories:
- "Azure-Cosmos-Operation-Request-Diagnostics"
diff --git a/tests/Aspire.Confluent.Kafka.Tests/Aspire.Confluent.Kafka.Tests.csproj b/tests/Aspire.Confluent.Kafka.Tests/Aspire.Confluent.Kafka.Tests.csproj
new file mode 100644
index 0000000000..42d3bd04fc
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/Aspire.Confluent.Kafka.Tests.csproj
@@ -0,0 +1,13 @@
+
+
+
+ $(NetCurrent)
+
+
+
+
+
+
+
+
+
diff --git a/tests/Aspire.Confluent.Kafka.Tests/CommonHelpers.cs b/tests/Aspire.Confluent.Kafka.Tests/CommonHelpers.cs
new file mode 100644
index 0000000000..73db28077b
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/CommonHelpers.cs
@@ -0,0 +1,9 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+internal sealed class CommonHelpers
+{
+ public const string TestingEndpoint = "localhost:9092";
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/ConsumerConfigurationTests.cs b/tests/Aspire.Confluent.Kafka.Tests/ConsumerConfigurationTests.cs
new file mode 100644
index 0000000000..3d84545dea
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/ConsumerConfigurationTests.cs
@@ -0,0 +1,160 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Text;
+using Confluent.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+using Xunit;
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+public class ConsumerConfigurationTests
+{
+ [ConditionalTheory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ReadsFromConnectionStringsCorrectly(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint),
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"), "unused")
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaConsumer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaConsumer("messaging");
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed
+ ? host.Services.GetRequiredKeyedService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!, "messaging")
+ : host.Services.GetRequiredService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ConsumerConfig config = GetConsumerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [ConditionalTheory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ConnectionStringCanBeSetInCode(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", "unused"),
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"), "unused")
+ ]);
+
+ static void SetConnectionString(KafkaConsumerSettings settings) => settings.ConnectionString = CommonHelpers.TestingEndpoint;
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaConsumer("messaging", configureSettings: SetConnectionString);
+ }
+ else
+ {
+ builder.AddKafkaConsumer("messaging", configureSettings: SetConnectionString);
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed
+ ? host.Services.GetRequiredKeyedService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!, "messaging")
+ : host.Services.GetRequiredService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ConsumerConfig config = GetConsumerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [ConditionalTheory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ConnectionNameWinsOverConfigSection(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "ConnectionString"), "unused"),
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint),
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"), "unused")
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaConsumer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaConsumer("messaging");
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed
+ ? host.Services.GetRequiredKeyedService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!, "messaging")
+ : host.Services.GetRequiredService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ConsumerConfig config = GetConsumerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [Fact]
+ public void ConsumerConfigOptionsFromConfig()
+ {
+ static Stream CreateStreamFromString(string data) => new MemoryStream(Encoding.UTF8.GetBytes(data));
+
+ using var jsonStream = CreateStreamFromString("""
+ {
+ "Aspire": {
+ "Confluent": {
+ "Kafka": {
+ "Consumer": {
+ "Config": {
+ "BootstrapServers": "localhost:9092",
+ "AutoOffsetReset": "Earliest",
+ "GroupId": "consumer-group",
+ "SaslUsername": "user",
+ "SaslPassword": "password",
+ "SaslMechanism": "Plain",
+ "SecurityProtocol": "Plaintext"
+ }
+ }
+ }
+ }
+ }
+ }
+ """);
+
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ builder.Configuration.AddJsonStream(jsonStream);
+
+ builder.AddKafkaConsumer("messaging");
+
+ var host = builder.Build();
+ var connectionFactory = host.Services.GetRequiredService(ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ConsumerConfig config = GetConsumerConfig(connectionFactory)!;
+
+ Assert.Equal(AutoOffsetReset.Earliest, config.AutoOffsetReset);
+ Assert.Equal("consumer-group", config.GroupId);
+ Assert.Equal("user", config.SaslUsername);
+ Assert.Equal("password", config.SaslPassword);
+ Assert.Equal(SaslMechanism.Plain, config.SaslMechanism);
+ Assert.Equal(SecurityProtocol.Plaintext, config.SecurityProtocol);
+ }
+
+ private static ConsumerConfig? GetConsumerConfig(object o) => ReflectionHelpers.ConsumerConnectionFactoryStringKeyStringValueType.Value!.GetProperty("Config")!.GetValue(o) as ConsumerConfig;
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/ConsumerConformanceTests.cs b/tests/Aspire.Confluent.Kafka.Tests/ConsumerConformanceTests.cs
new file mode 100644
index 0000000000..af7c102998
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/ConsumerConformanceTests.cs
@@ -0,0 +1,86 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire.Components.ConformanceTests;
+using Confluent.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+
+namespace Aspire.Confluent.Kafka.Tests;
+public class ConsumerConformanceTests : ConformanceTests, KafkaConsumerSettings>
+{
+ protected override ServiceLifetime ServiceLifetime => ServiceLifetime.Singleton;
+
+ protected override string ActivitySourceName => throw new NotImplementedException();
+
+ protected override string JsonSchemaPath => "src/Components/Aspire.Confluent.Kafka/ConfigurationSchema.json";
+
+ protected override string[] RequiredLogCategories => [
+ "Aspire.Confluent.Kafka"
+ ];
+
+ protected override void PopulateConfiguration(ConfigurationManager configuration, string? key = null)
+ {
+ configuration.AddInMemoryCollection(new KeyValuePair[]
+ {
+ new KeyValuePair(CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "ConnectionString"),
+ "localhost:9092"),
+ new KeyValuePair(CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"),
+ "test")
+ });
+ }
+
+ protected override void RegisterComponent(HostApplicationBuilder builder, Action? configure = null, string? key = null)
+ {
+ if (key is null)
+ {
+ builder.AddKafkaConsumer("messaging", configure);
+ }
+ else
+ {
+ builder.AddKeyedKafkaConsumer(key, configure);
+ }
+ }
+
+ protected override void SetHealthCheck(KafkaConsumerSettings options, bool enabled) => options.HealthChecks = enabled;
+
+ protected override void SetMetrics(KafkaConsumerSettings options, bool enabled) => options.Metrics = enabled;
+
+ protected override void SetTracing(KafkaConsumerSettings options, bool enabled)
+ {
+ throw new NotImplementedException();
+ }
+
+ protected override void TriggerActivity(IConsumer service)
+ {
+ service.Subscribe("test");
+ }
+
+ protected override bool SupportsKeyedRegistrations => true;
+
+ protected override string ValidJsonConfig => """
+ {
+ "Aspire": {
+ "Confluent": {
+ "Kafka": {
+ "Consumer": {
+ "ConnectionString": "localhost:9092",
+ "HealthChecks": true,
+ "Metrics": true,
+ "Config": {
+ "GroupId": "test"
+ }
+ }
+ }
+ }
+ }
+ }
+ """;
+
+ protected override (string json, string error)[] InvalidJsonToErrorMessage => new[]
+ {
+ ("""{"Aspire": { "Confluent":{ "Kafka": { "Consumer": { "Metrics": 0}}}}}""", "Value is \"integer\" but should be \"boolean\""),
+ ("""{"Aspire": { "Confluent":{ "Kafka": { "Consumer": { "HealthChecks": 0}}}}}""", "Value is \"integer\" but should be \"boolean\"")
+ };
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/MetricsTests.cs b/tests/Aspire.Confluent.Kafka.Tests/MetricsTests.cs
new file mode 100644
index 0000000000..829b84275a
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/MetricsTests.cs
@@ -0,0 +1,345 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.Metrics;
+using System.Threading.Channels;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Diagnostics.Metrics.Testing;
+using Microsoft.Extensions.Hosting;
+using Xunit;
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+public class MetricsTests
+{
+ [Theory]
+ [ClassData(typeof(Get_ExposesStatisticsAsCountersAndGauge_InitializeCounters_TestVariations))]
+ public async Task ExposesStatisticsAsCountersAndGauge_InitializeCounters(TestVariationData variation)
+ {
+ bool useKeyed = variation.UseKeyed;
+ List statistics = variation.Statistics;
+
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint),
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"), "unused")
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaConsumer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaConsumer("messaging");
+ }
+
+ using (var host = builder.Build())
+ {
+ await host.StartAsync();
+
+ object metricsChannel = host.Services.GetRequiredService(ReflectionHelpers.MetricsChannelType.Value!);
+ ChannelWriter writer = GetMetricsChannelWriter(metricsChannel)!;
+ IMeterFactory meterFactory = host.Services.GetRequiredService();
+ MetricCollector collectorConsumerQueueMessageCount = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.consumer.queue.message_count");
+ MetricCollector collectorProducerQueueMessageCount = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.producer.queue.message_count");
+ MetricCollector collectorProducerQueueSize = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.producer.queue.size");
+ MetricCollector collectorNetworkTx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.tx");
+ MetricCollector collectorNetworkTransmitted = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.transmitted");
+ MetricCollector collectorNetworkRx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.rx");
+ MetricCollector collectorNetworkReceived = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.received");
+ MetricCollector collectorMessageTx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.publish.messages");
+ MetricCollector collectorMessageTransmitted = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.message.transmitted");
+ MetricCollector collectorMessageRx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.receive.messages");
+ MetricCollector collectorMessageReceived = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.message.received");
+
+ foreach (var statistic in statistics)
+ {
+ writer.TryWrite(statistic);
+ }
+
+ await Task.WhenAll(
+ collectorNetworkTx.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkTransmitted.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkRx.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkReceived.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageTx.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageTransmitted.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageRx.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageReceived.WaitForMeasurementsAsync(statistics.Count)
+ );
+
+ collectorConsumerQueueMessageCount.RecordObservableInstruments();
+ collectorProducerQueueMessageCount.RecordObservableInstruments();
+ collectorProducerQueueSize.RecordObservableInstruments();
+
+ Assert.Equal(100, collectorProducerQueueMessageCount.LastMeasurement!.Value);
+ Assert.Contains(collectorProducerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorProducerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(100, collectorConsumerQueueMessageCount.LastMeasurement!.Value);
+ Assert.Contains(collectorConsumerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorConsumerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(1638400, collectorProducerQueueSize.LastMeasurement!.Value);
+ Assert.Contains(collectorProducerQueueSize.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorProducerQueueSize.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(5, collectorNetworkTx.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorNetworkTransmitted.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorNetworkRx.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorNetworkReceived.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorMessageTx.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorMessageTransmitted.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorMessageRx.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorMessageReceived.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+ }
+ }
+
+ [Theory]
+ [ClassData(typeof(Get_ExposesStatisticsAsCountersAndGauge_AggregateCountersByName_TestVariations))]
+ public async Task ExposesStatisticsAsCountersAndGauge_AggregateCountersByName(TestVariationData variation)
+ {
+ bool useKeyed = variation.UseKeyed;
+ List statistics = variation.Statistics!;
+
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint),
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Consumer", key, "Config:GroupId"), "unused")
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaConsumer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaConsumer("messaging");
+ }
+
+ using (var host = builder.Build())
+ {
+ await host.StartAsync();
+
+ object metricsChannel = host.Services.GetRequiredService(ReflectionHelpers.MetricsChannelType.Value!);
+ ChannelWriter writer = GetMetricsChannelWriter(metricsChannel)!;
+ IMeterFactory meterFactory = host.Services.GetRequiredService();
+ MetricCollector collectorConsumerQueueMessageCount = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.consumer.queue.message_count");
+ MetricCollector collectorProducerQueueMessageCount = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.producer.queue.message_count");
+ MetricCollector collectorProducerQueueSize = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.producer.queue.size");
+ MetricCollector collectorNetworkTx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.tx");
+ MetricCollector collectorNetworkTransmitted = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.transmitted");
+ MetricCollector collectorNetworkRx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.rx");
+ MetricCollector collectorNetworkReceived = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.network.received");
+ MetricCollector collectorMessageTx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.publish.messages");
+ MetricCollector collectorMessageTransmitted = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.message.transmitted");
+ MetricCollector collectorMessageRx = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.receive.messages");
+ MetricCollector collectorMessageReceived = new MetricCollector(meterFactory, "Aspire.Confluent.Kafka", "messaging.kafka.message.received");
+
+ foreach (var statistic in statistics)
+ {
+ writer.TryWrite(statistic);
+ }
+
+ await Task.WhenAll(
+ collectorNetworkTx.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkTransmitted.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkRx.WaitForMeasurementsAsync(statistics.Count),
+ collectorNetworkReceived.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageTx.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageTransmitted.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageRx.WaitForMeasurementsAsync(statistics.Count),
+ collectorMessageReceived.WaitForMeasurementsAsync(statistics.Count)
+ );
+
+ collectorConsumerQueueMessageCount.RecordObservableInstruments();
+ collectorProducerQueueMessageCount.RecordObservableInstruments();
+ collectorProducerQueueSize.RecordObservableInstruments();
+
+ Assert.Equal(200, collectorProducerQueueMessageCount.LastMeasurement!.Value);
+ Assert.Contains(collectorProducerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorProducerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(200, collectorConsumerQueueMessageCount.LastMeasurement!.Value);
+ Assert.Contains(collectorConsumerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorConsumerQueueMessageCount.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(3276800, collectorProducerQueueSize.LastMeasurement!.Value);
+ Assert.Contains(collectorProducerQueueSize.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorProducerQueueSize.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+
+ Assert.Equal(5, collectorNetworkTx.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkTx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorNetworkTransmitted.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkTransmitted.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorNetworkRx.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkRx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorNetworkReceived.LastMeasurement!.Value);
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorNetworkReceived.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorMessageTx.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageTx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorMessageTransmitted.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageTransmitted.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(5, collectorMessageRx.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageRx.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+
+ Assert.Equal(1638400, collectorMessageReceived.LastMeasurement!.Value);
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "messaging.client_id" && t.Value!.ToString() == "rdkafka");
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "name" && t.Value!.ToString() == "rdkafka#producer-1");
+ Assert.Contains(collectorMessageReceived.LastMeasurement!.Tags, t => t.Key == "type" && t.Value!.ToString() == "producer");
+ }
+ }
+
+ private static ChannelWriter? GetMetricsChannelWriter(object o) => ReflectionHelpers.MetricsChannelType.Value!.GetProperty("Writer")!.GetValue(o) as ChannelWriter;
+
+ public class Get_ExposesStatisticsAsCountersAndGauge_InitializeCounters_TestVariations : TheoryData
+ {
+ public Get_ExposesStatisticsAsCountersAndGauge_InitializeCounters_TestVariations()
+ {
+ string s1 = """
+ {
+ "client_id": "rdkafka",
+ "type": "producer",
+ "name": "rdkafka#producer-1",
+ "replyq": 100,
+ "msg_cnt": 100,
+ "msg_size": 1638400,
+ "tx": 5,
+ "tx_bytes": 1638400,
+ "txmsgs": 5,
+ "txmsg_bytes": 1638400,
+ "rx": 5,
+ "rx_bytes": 1638400,
+ "rxmsgs": 5,
+ "rxmsg_bytes": 1638400
+ }
+ """;
+ Add(new TestVariationData()
+ {
+ UseKeyed = true,
+ Statistics = [s1]
+ });
+ Add(new TestVariationData()
+ {
+ UseKeyed = false,
+ Statistics = [s1]
+ });
+ }
+ }
+
+ public class Get_ExposesStatisticsAsCountersAndGauge_AggregateCountersByName_TestVariations : TheoryData
+ {
+ public Get_ExposesStatisticsAsCountersAndGauge_AggregateCountersByName_TestVariations()
+ {
+ string s1 = """
+ {
+ "client_id": "rdkafka",
+ "type": "producer",
+ "name": "rdkafka#producer-1",
+ "replyq": 100,
+ "msg_cnt": 100,
+ "msg_size": 1638400,
+ "tx": 5,
+ "tx_bytes": 1638400,
+ "txmsgs": 5,
+ "txmsg_bytes": 1638400,
+ "rx": 5,
+ "rx_bytes": 1638400,
+ "rxmsgs": 5,
+ "rxmsg_bytes": 1638400
+ }
+ """;
+ string s2 = """
+ {
+ "client_id": "rdkafka",
+ "type": "producer",
+ "name": "rdkafka#producer-1",
+ "replyq": 200,
+ "msg_cnt": 200,
+ "msg_size": 3276800,
+ "tx": 10,
+ "tx_bytes": 3276800,
+ "txmsgs": 10,
+ "txmsg_bytes": 3276800,
+ "rx": 10,
+ "rx_bytes": 3276800,
+ "rxmsgs": 10,
+ "rxmsg_bytes": 3276800
+ }
+ """;
+ Add(new TestVariationData()
+ {
+ UseKeyed = true,
+ Statistics = [s1, s2]
+ });
+ Add(new TestVariationData()
+ {
+ UseKeyed = false,
+ Statistics = [s1, s2]
+ });
+ }
+ }
+
+ public record TestVariationData
+ {
+ public bool UseKeyed { get; set; }
+ public required List Statistics { get; set; }
+ }
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/ProducerConfigurationTests.cs b/tests/Aspire.Confluent.Kafka.Tests/ProducerConfigurationTests.cs
new file mode 100644
index 0000000000..65e30e8e05
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/ProducerConfigurationTests.cs
@@ -0,0 +1,151 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Text;
+using Confluent.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+using Xunit;
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+public class ProducerConfigurationTests
+{
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ReadsFromConnectionStringsCorrectly(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint)
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaProducer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaProducer("messaging");
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed ?
+ host.Services.GetRequiredKeyedService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!, "messaging") :
+ host.Services.GetRequiredService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ProducerConfig config = GetProducerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ConnectionStringCanBeSetInCode(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair("ConnectionStrings:messaging", "unused")
+ ]);
+
+ static void SetConnectionString(KafkaProducerSettings settings) => settings.ConnectionString = CommonHelpers.TestingEndpoint;
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaProducer("messaging", configureSettings: SetConnectionString);
+ }
+ else
+ {
+ builder.AddKafkaProducer("messaging", configureSettings: SetConnectionString);
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed ?
+ host.Services.GetRequiredKeyedService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!, "messaging") :
+ host.Services.GetRequiredService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ProducerConfig config = GetProducerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public void ConnectionNameWinsOverConfigSection(bool useKeyed)
+ {
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ var key = useKeyed ? "messaging" : null;
+ builder.Configuration.AddInMemoryCollection([
+ new KeyValuePair(ProducerConformanceTests.CreateConfigKey("Aspire:Confluent:Kafka:Producer", key, "ConnectionString"), "unused"),
+ new KeyValuePair("ConnectionStrings:messaging", CommonHelpers.TestingEndpoint)
+ ]);
+
+ if (useKeyed)
+ {
+ builder.AddKeyedKafkaProducer("messaging");
+ }
+ else
+ {
+ builder.AddKafkaProducer("messaging");
+ }
+
+ var host = builder.Build();
+ var connectionFactory = useKeyed ?
+ host.Services.GetRequiredKeyedService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!, "messaging") :
+ host.Services.GetRequiredService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ProducerConfig config = GetProducerConfig(connectionFactory)!;
+
+ Assert.Equal(CommonHelpers.TestingEndpoint, config.BootstrapServers);
+ }
+
+ [Fact]
+ public void ProducerConfigOptionsFromConfig()
+ {
+ static Stream CreateStreamFromString(string data) => new MemoryStream(Encoding.UTF8.GetBytes(data));
+
+ using var jsonStream = CreateStreamFromString("""
+ {
+ "Aspire": {
+ "Confluent": {
+ "Kafka": {
+ "Producer": {
+ "Config": {
+ "BootstrapServers": "localhost:9092",
+ "Acks": "All",
+ "SaslUsername": "user",
+ "SaslPassword": "password",
+ "SaslMechanism": "Plain",
+ "SecurityProtocol": "Plaintext"
+ }
+ }
+ }
+ }
+ }
+ }
+ """);
+
+ var builder = Host.CreateEmptyApplicationBuilder(null);
+
+ builder.Configuration.AddJsonStream(jsonStream);
+
+ builder.AddKafkaProducer("messaging");
+
+ var host = builder.Build();
+ var connectionFactory = host.Services.GetRequiredService(ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!);
+
+ ProducerConfig config = GetProducerConfig(connectionFactory)!;
+
+ Assert.Equal(Acks.All, config.Acks);
+ Assert.Equal("user", config.SaslUsername);
+ Assert.Equal("password", config.SaslPassword);
+ Assert.Equal(SaslMechanism.Plain, config.SaslMechanism);
+ Assert.Equal(SecurityProtocol.Plaintext, config.SecurityProtocol);
+ }
+
+ private static ProducerConfig? GetProducerConfig(object o) => ReflectionHelpers.ProducerConnectionFactoryStringKeyStringValueType.Value!.GetProperty("Config")!.GetValue(o) as ProducerConfig;
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/ProducerConformanceTests.cs b/tests/Aspire.Confluent.Kafka.Tests/ProducerConformanceTests.cs
new file mode 100644
index 0000000000..eff8bbca14
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/ProducerConformanceTests.cs
@@ -0,0 +1,82 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire.Components.ConformanceTests;
+using Confluent.Kafka;
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+public class ProducerConformanceTests : ConformanceTests, KafkaProducerSettings>
+{
+ protected override ServiceLifetime ServiceLifetime => ServiceLifetime.Singleton;
+
+ protected override string ActivitySourceName => throw new NotImplementedException();
+
+ protected override string JsonSchemaPath => "src/Components/Aspire.Confluent.Kafka/ConfigurationSchema.json";
+
+ protected override string[] RequiredLogCategories => [
+ "Aspire.Confluent.Kafka"
+ ];
+
+ protected override void PopulateConfiguration(ConfigurationManager configuration, string? key = null)
+ {
+ configuration.AddInMemoryCollection(new KeyValuePair[1]
+ {
+ new KeyValuePair(CreateConfigKey("Aspire:Confluent:Kafka:Producer", key, "ConnectionString"),
+ "localhost:9092")
+ });
+ }
+
+ protected override void RegisterComponent(HostApplicationBuilder builder, Action? configure = null, string? key = null)
+ {
+ if (key is null)
+ {
+ builder.AddKafkaProducer("messaging", configure);
+ }
+ else
+ {
+ builder.AddKeyedKafkaProducer(key, configure);
+ }
+ }
+
+ protected override void SetHealthCheck(KafkaProducerSettings options, bool enabled) => options.HealthChecks = enabled;
+
+ protected override void SetMetrics(KafkaProducerSettings options, bool enabled) => options.Metrics = enabled;
+
+ protected override void SetTracing(KafkaProducerSettings options, bool enabled)
+ {
+ throw new NotImplementedException();
+ }
+
+ protected override void TriggerActivity(IProducer service)
+ {
+ service.Produce("test", new Message { Key = "test", Value = "test" });
+ service.Flush(TimeSpan.FromMilliseconds(1000));
+ }
+
+ protected override bool SupportsKeyedRegistrations => true;
+
+ protected override string ValidJsonConfig => """
+ {
+ "Aspire": {
+ "Confluent": {
+ "Kafka": {
+ "Producer": {
+ "ConnectionString": "localhost:9092",
+ "HealthChecks": true,
+ "Metrics": true
+ }
+ }
+ }
+ }
+ }
+ """;
+ protected override (string json, string error)[] InvalidJsonToErrorMessage => new[]
+ {
+ ("""{"Aspire": { "Confluent":{ "Kafka": { "Producer": { "Metrics": 0}}}}}""", "Value is \"integer\" but should be \"boolean\""),
+ ("""{"Aspire": { "Confluent":{ "Kafka": { "Producer": { "HealthChecks": 0}}}}}""", "Value is \"integer\" but should be \"boolean\"")
+ };
+}
diff --git a/tests/Aspire.Confluent.Kafka.Tests/ReflectionHelpers.cs b/tests/Aspire.Confluent.Kafka.Tests/ReflectionHelpers.cs
new file mode 100644
index 0000000000..ba453053e4
--- /dev/null
+++ b/tests/Aspire.Confluent.Kafka.Tests/ReflectionHelpers.cs
@@ -0,0 +1,17 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Reflection;
+using Microsoft.Extensions.Hosting;
+
+namespace Aspire.Confluent.Kafka.Tests;
+
+internal static class ReflectionHelpers
+{
+ public static readonly Assembly ComponentAssembly = typeof(AspireKafkaConsumerExtensions).Assembly;
+ public static readonly Lazy MetricsChannelType = new Lazy(() => ComponentAssembly.GetType("Aspire.Confluent.Kafka.MetricsChannel")!);
+ public static readonly Lazy ProducerConnectionFactoryType = new Lazy(() => ComponentAssembly.GetType("Aspire.Confluent.Kafka.ProducerConnectionFactory`2")!);
+ public static readonly Lazy ProducerConnectionFactoryStringKeyStringValueType = new Lazy(() => ProducerConnectionFactoryType.Value.MakeGenericType(typeof(string), typeof(string)));
+ public static readonly Lazy ConsumerConnectionFactoryType = new Lazy(() => ComponentAssembly.GetType("Aspire.Confluent.Kafka.ConsumerConnectionFactory`2")!);
+ public static readonly Lazy ConsumerConnectionFactoryStringKeyStringValueType = new Lazy(() => ConsumerConnectionFactoryType.Value.MakeGenericType(typeof(string), typeof(string)));
+}
diff --git a/tests/Aspire.Hosting.Tests/Kafka/KafkaFunctionalTests.cs b/tests/Aspire.Hosting.Tests/Kafka/KafkaFunctionalTests.cs
new file mode 100644
index 0000000000..95247ea1df
--- /dev/null
+++ b/tests/Aspire.Hosting.Tests/Kafka/KafkaFunctionalTests.cs
@@ -0,0 +1,31 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Aspire.Hosting.Tests.Helpers;
+using Xunit;
+
+namespace Aspire.Hosting.Tests.Kafka;
+
+[Collection("IntegrationServices")]
+public class KafkaFunctionalTests(IntegrationServicesFixture integrationServicesFixture)
+{
+ [LocalOnlyFact]
+ public async Task KafkaComponentCanProduceAndConsume()
+ {
+ var testProgram = integrationServicesFixture.TestProgram;
+
+ var client = integrationServicesFixture.HttpClient;
+
+ using var cts = new CancellationTokenSource(TimeSpan.FromMinutes(1));
+
+ string topic = $"topic-{Guid.NewGuid()}";
+
+ var response = await testProgram.IntegrationServiceABuilder!.HttpGetAsync(client, "http", $"/kafka/produce/{topic}", cts.Token);
+ var responseContent = await response.Content.ReadAsStringAsync();
+ Assert.True(response.IsSuccessStatusCode, responseContent);
+
+ response = await testProgram.IntegrationServiceABuilder!.HttpGetAsync(client, "http", $"/kafka/consume/{topic}", cts.Token);
+ responseContent = await response.Content.ReadAsStringAsync();
+ Assert.True(response.IsSuccessStatusCode, responseContent);
+ }
+}
diff --git a/tests/Aspire.Hosting.Tests/ManifestGenerationTests.cs b/tests/Aspire.Hosting.Tests/ManifestGenerationTests.cs
index 900dbe09b4..16d828cadd 100644
--- a/tests/Aspire.Hosting.Tests/ManifestGenerationTests.cs
+++ b/tests/Aspire.Hosting.Tests/ManifestGenerationTests.cs
@@ -309,7 +309,7 @@ public void EnsureAllAzureStorageManifestTypesHaveVersion0Suffix()
}
[Fact]
- public void EnsureAllRabitMQManifestTypesHaveVersion0Suffix()
+ public void EnsureAllRabbitMQManifestTypesHaveVersion0Suffix()
{
var program = CreateTestProgramJsonDocumentManifestPublisher();
@@ -331,6 +331,29 @@ public void EnsureAllRabitMQManifestTypesHaveVersion0Suffix()
Assert.Equal("container.v0", server.GetProperty("type").GetString());
}
+ [Fact]
+ public void EnsureAllKafkaManifestTypesHaveVersion0Suffix()
+ {
+ var program = CreateTestProgramJsonDocumentManifestPublisher();
+
+ program.AppBuilder.AddKafka("kafkaabstract");
+ program.AppBuilder.AddKafkaContainer("kafkacontainer");
+
+ // Build AppHost so that publisher can be resolved.
+ program.Build();
+ var publisher = program.GetManifestPublisher();
+
+ program.Run();
+
+ var resources = publisher.ManifestDocument.RootElement.GetProperty("resources");
+
+ var connection = resources.GetProperty("kafkaabstract");
+ Assert.Equal("kafka.server.v0", connection.GetProperty("type").GetString());
+
+ var server = resources.GetProperty("kafkacontainer");
+ Assert.Equal("container.v0", server.GetProperty("type").GetString());
+ }
+
[Fact]
public void EnsureAllKeyVaultManifestTypesHaveVersion0Suffix()
{
diff --git a/tests/testproject/TestProject.AppHost/TestProgram.cs b/tests/testproject/TestProject.AppHost/TestProgram.cs
index 21264e729e..e9f9f2646f 100644
--- a/tests/testproject/TestProject.AppHost/TestProgram.cs
+++ b/tests/testproject/TestProject.AppHost/TestProgram.cs
@@ -52,6 +52,7 @@ private TestProgram(string[] args, Assembly assembly, bool includeIntegrationSer
.AddDatabase(mongoDbName);
var oracleDatabaseContainer = AppBuilder.AddOracleDatabaseContainer("oracledatabasecontainer")
.AddDatabase(oracleDbName);
+ var kafkaContainer = AppBuilder.AddKafkaContainer("kafkacontainer");
var sqlserverAbstract = AppBuilder.AddSqlServer("sqlserverabstract");
var mysqlAbstract = AppBuilder.AddMySql("mysqlabstract");
@@ -60,6 +61,7 @@ private TestProgram(string[] args, Assembly assembly, bool includeIntegrationSer
var rabbitmqAbstract = AppBuilder.AddRabbitMQ("rabbitmqabstract");
var mongodbAbstract = AppBuilder.AddMongoDB("mongodbabstract");
var oracleDatabaseAbstract = AppBuilder.AddOracleDatabaseContainer("oracledatabaseabstract");
+ var kafkaAbstract = AppBuilder.AddKafka("kafkaabstract");
IntegrationServiceABuilder = AppBuilder.AddProject("integrationservicea")
.WithReference(sqlserverContainer)
@@ -69,13 +71,15 @@ private TestProgram(string[] args, Assembly assembly, bool includeIntegrationSer
.WithReference(rabbitmqContainer)
.WithReference(mongodbContainer)
.WithReference(oracleDatabaseContainer)
+ .WithReference(kafkaContainer)
.WithReference(sqlserverAbstract)
.WithReference(mysqlAbstract)
.WithReference(redisAbstract)
.WithReference(postgresAbstract)
.WithReference(rabbitmqAbstract)
.WithReference(mongodbAbstract)
- .WithReference(oracleDatabaseAbstract);
+ .WithReference(oracleDatabaseAbstract)
+ .WithReference(kafkaAbstract);
}
}
diff --git a/tests/testproject/TestProject.AppHost/aspire-manifest.json b/tests/testproject/TestProject.AppHost/aspire-manifest.json
index 65eb5e08af..6f5b9bafb6 100644
--- a/tests/testproject/TestProject.AppHost/aspire-manifest.json
+++ b/tests/testproject/TestProject.AppHost/aspire-manifest.json
@@ -238,6 +238,22 @@
}
}
},
+ "kafkacontainer": {
+ "type": "container.v0",
+ "image": "confluentinc/confluent-local",
+ "env": {
+ "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:{kafkacontainer.bindings.tcp.port}"
+ },
+ "bindings": {
+ "tcp": {
+ "scheme": "tcp",
+ "protocol": "tcp",
+ "transport": "tcp",
+ "containerPort": 9092
+ }
+ },
+ "connectionString": "{kafkacontainer.bindings.tcp.host}:{kafkacontainer.bindings.tcp.port}"
+ },
"mongodbcontainer": {
"type": "container.v0",
"image": "mongo:latest",
@@ -382,6 +398,22 @@
}
}
},
+ "kafkaabstract": {
+ "type": "container.v0",
+ "image": "confluentinc/confluent-local",
+ "env": {
+ "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:{kafkaabstract.bindings.tcp.port}"
+ },
+ "bindings": {
+ "tcp": {
+ "scheme": "tcp",
+ "protocol": "tcp",
+ "transport": "tcp",
+ "containerPort": 9092
+ }
+ },
+ "connectionString": "{kafkaabstract.bindings.tcp.host}:{kafkaabstract.bindings.tcp.port}"
+ },
"mongodbabstract": {
"type": "mongodb.server.v0"
},
@@ -418,4 +450,4 @@
}
}
}
-}
\ No newline at end of file
+}
diff --git a/tests/testproject/TestProject.IntegrationServiceA/Kafka/KafkaExtensions.cs b/tests/testproject/TestProject.IntegrationServiceA/Kafka/KafkaExtensions.cs
new file mode 100644
index 0000000000..1f1b095f84
--- /dev/null
+++ b/tests/testproject/TestProject.IntegrationServiceA/Kafka/KafkaExtensions.cs
@@ -0,0 +1,45 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+public static class KafkaExtensions
+{
+ public static void MapKafkaApi(this WebApplication app)
+ {
+ app.MapGet("/kafka/produce/{topic}", async (IProducer