From 33c26f4dd1aaebadd54e7eb50765a33bd7c3ad2f Mon Sep 17 00:00:00 2001 From: benclive Date: Fri, 30 Aug 2024 09:54:44 +0100 Subject: [PATCH] feat: Add initial support for a kafka-based ingest path (#13992) Co-authored-by: Cyril Tovena --- docs/sources/shared/configuration.md | 402 +- go.mod | 5 +- go.sum | 10 +- pkg/ingester-kafka/ingester.go | 499 + pkg/ingester-kafka/kafka/kafka_tee.go | 209 + pkg/ingester-kafka/metrics.go | 20 + .../partitionring/partition_ring.go | 47 + pkg/ingester-rf1/ingester.go | 7 - pkg/ingester-rf1/objstore/storage.go | 6 + pkg/loki/config_wrapper.go | 15 + pkg/loki/loki.go | 26 +- pkg/loki/modules.go | 70 + vendor/github.com/pierrec/lz4/v4/README.md | 2 +- .../pierrec/lz4/v4/compressing_reader.go | 222 + .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- vendor/github.com/pierrec/lz4/v4/options.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 4 + vendor/github.com/twmb/franz-go/LICENSE | 24 + .../twmb/franz-go/pkg/kbin/primitives.go | 856 + .../github.com/twmb/franz-go/pkg/kerr/kerr.go | 315 + .../franz-go/pkg/kgo/atomic_maybe_work.go | 76 + .../twmb/franz-go/pkg/kgo/broker.go | 1507 + .../twmb/franz-go/pkg/kgo/client.go | 4553 ++ .../twmb/franz-go/pkg/kgo/compression.go | 346 + .../twmb/franz-go/pkg/kgo/config.go | 1758 + .../twmb/franz-go/pkg/kgo/consumer.go | 2347 + .../twmb/franz-go/pkg/kgo/consumer_direct.go | 159 + .../twmb/franz-go/pkg/kgo/consumer_group.go | 2908 + .../twmb/franz-go/pkg/kgo/errors.go | 321 + .../github.com/twmb/franz-go/pkg/kgo/go118.go | 57 + .../github.com/twmb/franz-go/pkg/kgo/go119.go | 14 + .../twmb/franz-go/pkg/kgo/group_balancer.go | 959 + .../github.com/twmb/franz-go/pkg/kgo/hooks.go | 420 + .../franz-go/pkg/kgo/internal/sticky/go121.go | 28 + .../franz-go/pkg/kgo/internal/sticky/goold.go | 22 + .../franz-go/pkg/kgo/internal/sticky/graph.go | 226 + .../pkg/kgo/internal/sticky/rbtree.go | 392 + .../pkg/kgo/internal/sticky/sticky.go | 733 + .../twmb/franz-go/pkg/kgo/logger.go | 124 + .../twmb/franz-go/pkg/kgo/metadata.go | 966 + .../twmb/franz-go/pkg/kgo/partitioner.go | 614 + .../twmb/franz-go/pkg/kgo/producer.go | 1226 + .../twmb/franz-go/pkg/kgo/record_and_fetch.go | 628 + .../twmb/franz-go/pkg/kgo/record_formatter.go | 2246 + .../github.com/twmb/franz-go/pkg/kgo/ring.go | 269 + .../github.com/twmb/franz-go/pkg/kgo/sink.go | 2380 + .../twmb/franz-go/pkg/kgo/source.go | 2326 + .../twmb/franz-go/pkg/kgo/strftime.go | 205 + .../franz-go/pkg/kgo/topics_and_partitions.go | 922 + .../github.com/twmb/franz-go/pkg/kgo/txn.go | 1257 + .../github.com/twmb/franz-go/pkg/kmsg/LICENSE | 24 + .../github.com/twmb/franz-go/pkg/kmsg/api.go | 423 + .../twmb/franz-go/pkg/kmsg/generated.go | 46895 ++++++++++++++++ .../pkg/kmsg/internal/kbin/primitives.go | 850 + .../twmb/franz-go/pkg/kmsg/record.go | 174 + .../twmb/franz-go/pkg/kversion/kversion.go | 1166 + .../github.com/twmb/franz-go/pkg/sasl/sasl.go | 41 + .../twmb/franz-go/plugin/kprom/LICENSE | 24 + .../twmb/franz-go/plugin/kprom/README.md | 42 + .../twmb/franz-go/plugin/kprom/config.go | 233 + .../twmb/franz-go/plugin/kprom/kprom.go | 510 + vendor/modules.txt | 17 +- 63 files changed, 83083 insertions(+), 81 deletions(-) create mode 100644 pkg/ingester-kafka/ingester.go create mode 100644 pkg/ingester-kafka/kafka/kafka_tee.go create mode 100644 pkg/ingester-kafka/metrics.go create mode 100644 pkg/ingester-kafka/partitionring/partition_ring.go create mode 100644 vendor/github.com/pierrec/lz4/v4/compressing_reader.go create mode 100644 vendor/github.com/twmb/franz-go/LICENSE create mode 100644 vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/broker.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/client.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/compression.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/config.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/errors.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/go118.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/go119.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/logger.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/producer.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/ring.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/sink.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/source.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kgo/txn.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE create mode 100644 vendor/github.com/twmb/franz-go/pkg/kmsg/api.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kmsg/record.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go create mode 100644 vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go create mode 100644 vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE create mode 100644 vendor/github.com/twmb/franz-go/plugin/kprom/README.md create mode 100644 vendor/github.com/twmb/franz-go/plugin/kprom/config.go create mode 100644 vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 9811088ca4a7..8081c058f89e 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -188,12 +188,14 @@ ingester_rf1: # Configuration for a Consul client. Only applies if the selected # kvstore is consul. - # The CLI flags prefix for this block configuration is: ingester-rf1 + # The CLI flags prefix for this block configuration is: + # ingester-rf1.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected # kvstore is etcd. - # The CLI flags prefix for this block configuration is: ingester-rf1 + # The CLI flags prefix for this block configuration is: + # ingester-rf1.etcd [etcd: ] multi: @@ -444,12 +446,14 @@ pattern_ingester: # Configuration for a Consul client. Only applies if the selected # kvstore is consul. - # The CLI flags prefix for this block configuration is: pattern-ingester + # The CLI flags prefix for this block configuration is: + # pattern-ingester.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected # kvstore is etcd. - # The CLI flags prefix for this block configuration is: pattern-ingester + # The CLI flags prefix for this block configuration is: + # pattern-ingester.etcd [etcd: ] multi: @@ -1023,6 +1027,267 @@ metastore_client: # Configures the gRPC client used to communicate with the metastore. [grpc_client_config: ] +partition_ring: + # The key-value store used to share the hash ring across multiple instances. + # This option needs be set on ingesters, distributors, queriers, and rulers + # when running in microservices mode. + kvstore: + # Backend storage to use for the ring. Supported values are: consul, etcd, + # inmemory, memberlist, multi. + # CLI flag: -ingester.partition-ring.store + [store: | default = "memberlist"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -ingester.partition-ring.prefix + [prefix: | default = "collectors/"] + + # Configuration for a Consul client. Only applies if the selected kvstore is + # consul. + # The CLI flags prefix for this block configuration is: + # ingester.partition-ring.consul + [consul: ] + + # Configuration for an ETCD v3 client. Only applies if the selected kvstore + # is etcd. + # The CLI flags prefix for this block configuration is: + # ingester.partition-ring.etcd + [etcd: ] + + multi: + # Primary backend storage used by multi-client. + # CLI flag: -ingester.partition-ring.multi.primary + [primary: | default = ""] + + # Secondary backend storage used by multi-client. + # CLI flag: -ingester.partition-ring.multi.secondary + [secondary: | default = ""] + + # Mirror writes to secondary store. + # CLI flag: -ingester.partition-ring.multi.mirror-enabled + [mirror_enabled: | default = false] + + # Timeout for storing value to secondary store. + # CLI flag: -ingester.partition-ring.multi.mirror-timeout + [mirror_timeout: | default = 2s] + + # Minimum number of owners to wait before a PENDING partition gets switched to + # ACTIVE. + # CLI flag: -ingester.partition-ring.min-partition-owners-count + [min_partition_owners_count: | default = 1] + + # How long the minimum number of owners are enforced before a PENDING + # partition gets switched to ACTIVE. + # CLI flag: -ingester.partition-ring.min-partition-owners-duration + [min_partition_owners_duration: | default = 10s] + + # How long to wait before an INACTIVE partition is eligible for deletion. The + # partition is deleted only if it has been in INACTIVE state for at least the + # configured duration and it has no owners registered. A value of 0 disables + # partitions deletion. + # CLI flag: -ingester.partition-ring.delete-inactive-partition-after + [delete_inactive_partition_after: | default = 13h] + +kafka_config: + # the kafka endpoint to connect to + # CLI flag: -address + [address: | default = "localhost:9092"] + + # The Kafka topic name. + # CLI flag: -.topic + [topic: | default = "loki.push"] + +kafka_ingester: + # Whether the kafka ingester is enabled. + # CLI flag: -kafka-ingester.enabled + [enabled: | default = false] + + # Configures how the lifecycle of the ingester will operate and where it will + # register for discovery. + lifecycler: + ring: + kvstore: + # Backend storage to use for the ring. Supported values are: consul, + # etcd, inmemory, memberlist, multi. + # CLI flag: -kafka-ingesterstore + [store: | default = "consul"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -kafka-ingesterprefix + [prefix: | default = "collectors/"] + + # Configuration for a Consul client. Only applies if the selected + # kvstore is consul. + # The CLI flags prefix for this block configuration is: + # kafka-ingesterconsul + [consul: ] + + # Configuration for an ETCD v3 client. Only applies if the selected + # kvstore is etcd. + # The CLI flags prefix for this block configuration is: + # kafka-ingesteretcd + [etcd: ] + + multi: + # Primary backend storage used by multi-client. + # CLI flag: -kafka-ingestermulti.primary + [primary: | default = ""] + + # Secondary backend storage used by multi-client. + # CLI flag: -kafka-ingestermulti.secondary + [secondary: | default = ""] + + # Mirror writes to secondary store. + # CLI flag: -kafka-ingestermulti.mirror-enabled + [mirror_enabled: | default = false] + + # Timeout for storing value to secondary store. + # CLI flag: -kafka-ingestermulti.mirror-timeout + [mirror_timeout: | default = 2s] + + # The heartbeat timeout after which ingesters are skipped for + # reads/writes. 0 = never (timeout disabled). + # CLI flag: -kafka-ingesterring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # The number of ingesters to write to and read from. + # CLI flag: -kafka-ingesterdistributor.replication-factor + [replication_factor: | default = 3] + + # True to enable the zone-awareness and replicate ingested samples across + # different availability zones. + # CLI flag: -kafka-ingesterdistributor.zone-awareness-enabled + [zone_awareness_enabled: | default = false] + + # Comma-separated list of zones to exclude from the ring. Instances in + # excluded zones will be filtered out from the ring. + # CLI flag: -kafka-ingesterdistributor.excluded-zones + [excluded_zones: | default = ""] + + # Number of tokens for each ingester. + # CLI flag: -kafka-ingesternum-tokens + [num_tokens: | default = 128] + + # Period at which to heartbeat to consul. 0 = disabled. + # CLI flag: -kafka-ingesterheartbeat-period + [heartbeat_period: | default = 5s] + + # Heartbeat timeout after which instance is assumed to be unhealthy. 0 = + # disabled. + # CLI flag: -kafka-ingesterheartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # Observe tokens after generating to resolve collisions. Useful when using + # gossiping ring. + # CLI flag: -kafka-ingesterobserve-period + [observe_period: | default = 0s] + + # Period to wait for a claim from another member; will join automatically + # after this. + # CLI flag: -kafka-ingesterjoin-after + [join_after: | default = 0s] + + # Minimum duration to wait after the internal readiness checks have passed + # but before succeeding the readiness endpoint. This is used to slowdown + # deployment controllers (eg. Kubernetes) after an instance is ready and + # before they proceed with a rolling update, to give the rest of the cluster + # instances enough time to receive ring updates. + # CLI flag: -kafka-ingestermin-ready-duration + [min_ready_duration: | default = 15s] + + # Name of network interface to read address from. + # CLI flag: -kafka-ingesterlifecycler.interface + [interface_names: | default = []] + + # Enable IPv6 support. Required to make use of IP addresses from IPv6 + # interfaces. + # CLI flag: -kafka-ingesterenable-inet6 + [enable_inet6: | default = false] + + # Duration to sleep for before exiting, to ensure metrics are scraped. + # CLI flag: -kafka-ingesterfinal-sleep + [final_sleep: | default = 0s] + + # File path where tokens are stored. If empty, tokens are not stored at + # shutdown and restored at startup. + # CLI flag: -kafka-ingestertokens-file-path + [tokens_file_path: | default = ""] + + # The availability zone where this instance is running. + # CLI flag: -kafka-ingesteravailability-zone + [availability_zone: | default = ""] + + # Unregister from the ring upon clean shutdown. It can be useful to disable + # for rolling restarts with consistent naming in conjunction with + # -distributor.extend-writes=false. + # CLI flag: -kafka-ingesterunregister-on-shutdown + [unregister_on_shutdown: | default = true] + + # When enabled the readiness probe succeeds only after all instances are + # ACTIVE and healthy in the ring, otherwise only the instance itself is + # checked. This option should be disabled if in your cluster multiple + # instances can be rolled out simultaneously, otherwise rolling updates may + # be slowed down. + # CLI flag: -kafka-ingesterreadiness-check-ring-health + [readiness_check_ring_health: | default = true] + + # IP address to advertise in the ring. + # CLI flag: -kafka-ingesterlifecycler.addr + [address: | default = ""] + + # port to advertise in consul (defaults to server.grpc-listen-port). + # CLI flag: -kafka-ingesterlifecycler.port + [port: | default = 0] + + # ID to register in the ring. + # CLI flag: -kafka-ingesterlifecycler.ID + [id: | default = ""] + + # Path where the shutdown marker file is stored. If not set and + # common.path_prefix is set then common.path_prefix will be used. + # CLI flag: -kafka-ingester.shutdown-marker-path + [shutdown_marker_path: | default = ""] + + partition_ring: + # The key-value store used to share the hash ring across multiple instances. + # This option needs be set on ingesters, distributors, queriers, and rulers + # when running in microservices mode. + kvstore: + [store: | default = ""] + + [prefix: | default = ""] + + # Configuration for a Consul client. Only applies if the selected kvstore + # is consul. + # The CLI flags prefix for this block configuration is: + # common.storage.ring.consul + [consul: ] + + # Configuration for an ETCD v3 client. Only applies if the selected + # kvstore is etcd. + # The CLI flags prefix for this block configuration is: + # common.storage.ring.etcd + [etcd: ] + + multi: + [primary: | default = ""] + + [secondary: | default = ""] + + [mirror_enabled: ] + + [mirror_timeout: ] + + [min_partition_owners_count: ] + + [min_partition_owners_duration: ] + + [delete_inactive_partition_after: ] + + kafkaconfig: + [address: | default = ""] + + [topic: | default = ""] + # Configuration for 'runtime config' module, responsible for reloading runtime # configuration file. [runtime_config: ] @@ -1975,12 +2240,10 @@ ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: common.storage.ring [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: common.storage.ring [etcd: ] multi: @@ -2154,12 +2417,13 @@ compactor_ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: compactor.ring + # The CLI flags prefix for this block configuration is: + # compactor.ring.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: compactor.ring + # The CLI flags prefix for this block configuration is: compactor.ring.etcd [etcd: ] multi: @@ -2238,45 +2502,48 @@ compactor_ring: Configuration for a Consul client. Only applies if the selected kvstore is `consul`. The supported CLI flags `` used to reference this configuration block are: -- `common.storage.ring` -- `compactor.ring` -- `distributor.ring` -- `index-gateway.ring` -- `ingester-rf1` -- `pattern-ingester` -- `query-scheduler.ring` -- `ruler.ring` +- `common.storage.ring.consul` +- `compactor.ring.consul` +- `consul` +- `distributor.ring.consul` +- `index-gateway.ring.consul` +- `ingester-rf1.consul` +- `ingester.partition-ring.consul` +- `kafka-ingesterconsul` +- `pattern-ingester.consul` +- `query-scheduler.ring.consul` +- `ruler.ring.consul`   ```yaml # Hostname and port of Consul. -# CLI flag: -.consul.hostname +# CLI flag: -.hostname [host: | default = "localhost:8500"] # ACL Token used to interact with Consul. -# CLI flag: -.consul.acl-token +# CLI flag: -.acl-token [acl_token: | default = ""] # HTTP timeout when talking to Consul -# CLI flag: -.consul.client-timeout +# CLI flag: -.client-timeout [http_client_timeout: | default = 20s] # Enable consistent reads to Consul. -# CLI flag: -.consul.consistent-reads +# CLI flag: -.consistent-reads [consistent_reads: | default = false] # Rate limit when watching key or prefix in Consul, in requests per second. 0 # disables the rate limit. -# CLI flag: -.consul.watch-rate-limit +# CLI flag: -.watch-rate-limit [watch_rate_limit: | default = 1] # Burst size used in rate limit. Values less than 1 are treated as 1. -# CLI flag: -.consul.watch-burst-size +# CLI flag: -.watch-burst-size [watch_burst_size: | default = 1] # Maximum duration to wait before retrying a Compare And Swap (CAS) operation. -# CLI flag: -.consul.cas-retry-delay +# CLI flag: -.cas-retry-delay [cas_retry_delay: | default = 1s] ``` @@ -2381,12 +2648,14 @@ ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: distributor.ring + # The CLI flags prefix for this block configuration is: + # distributor.ring.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: distributor.ring + # The CLI flags prefix for this block configuration is: + # distributor.ring.etcd [etcd: ] multi: @@ -2458,55 +2727,58 @@ otlp_config: Configuration for an ETCD v3 client. Only applies if the selected kvstore is `etcd`. The supported CLI flags `` used to reference this configuration block are: -- `common.storage.ring` -- `compactor.ring` -- `distributor.ring` -- `index-gateway.ring` -- `ingester-rf1` -- `pattern-ingester` -- `query-scheduler.ring` -- `ruler.ring` +- `common.storage.ring.etcd` +- `compactor.ring.etcd` +- `distributor.ring.etcd` +- `etcd` +- `index-gateway.ring.etcd` +- `ingester-rf1.etcd` +- `ingester.partition-ring.etcd` +- `kafka-ingesteretcd` +- `pattern-ingester.etcd` +- `query-scheduler.ring.etcd` +- `ruler.ring.etcd`   ```yaml # The etcd endpoints to connect to. -# CLI flag: -.etcd.endpoints +# CLI flag: -.endpoints [endpoints: | default = []] # The dial timeout for the etcd connection. -# CLI flag: -.etcd.dial-timeout +# CLI flag: -.dial-timeout [dial_timeout: | default = 10s] # The maximum number of retries to do for failed ops. -# CLI flag: -.etcd.max-retries +# CLI flag: -.max-retries [max_retries: | default = 10] # Enable TLS. -# CLI flag: -.etcd.tls-enabled +# CLI flag: -.tls-enabled [tls_enabled: | default = false] # Path to the client certificate, which will be used for authenticating with the # server. Also requires the key path to be configured. -# CLI flag: -.etcd.tls-cert-path +# CLI flag: -.tls-cert-path [tls_cert_path: | default = ""] # Path to the key for the client certificate. Also requires the client # certificate to be configured. -# CLI flag: -.etcd.tls-key-path +# CLI flag: -.tls-key-path [tls_key_path: | default = ""] # Path to the CA certificates to validate server certificate against. If not # set, the host's root CA certificates are used. -# CLI flag: -.etcd.tls-ca-path +# CLI flag: -.tls-ca-path [tls_ca_path: | default = ""] # Override the expected name on the server certificate. -# CLI flag: -.etcd.tls-server-name +# CLI flag: -.tls-server-name [tls_server_name: | default = ""] # Skip validating server certificate. -# CLI flag: -.etcd.tls-insecure-skip-verify +# CLI flag: -.tls-insecure-skip-verify [tls_insecure_skip_verify: | default = false] # Override the default cipher suite list (separated by commas). Allowed values: @@ -2539,20 +2811,20 @@ Configuration for an ETCD v3 client. Only applies if the selected kvstore is `et # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 -# CLI flag: -.etcd.tls-cipher-suites +# CLI flag: -.tls-cipher-suites [tls_cipher_suites: | default = ""] # Override the default minimum TLS version. Allowed values: VersionTLS10, # VersionTLS11, VersionTLS12, VersionTLS13 -# CLI flag: -.etcd.tls-min-version +# CLI flag: -.tls-min-version [tls_min_version: | default = ""] # Etcd username. -# CLI flag: -.etcd.username +# CLI flag: -.username [username: | default = ""] # Etcd password. -# CLI flag: -.etcd.password +# CLI flag: -.password [password: | default = ""] ``` @@ -2929,12 +3201,14 @@ ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: index-gateway.ring + # The CLI flags prefix for this block configuration is: + # index-gateway.ring.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: index-gateway.ring + # The CLI flags prefix for this block configuration is: + # index-gateway.ring.etcd [etcd: ] multi: @@ -3026,10 +3300,12 @@ lifecycler: # Configuration for a Consul client. Only applies if the selected kvstore # is consul. + # The CLI flags prefix for this block configuration is: consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected # kvstore is etcd. + # The CLI flags prefix for this block configuration is: etcd [etcd: ] multi: @@ -3298,16 +3574,26 @@ The `ingester_client` block configures how the distributor will connect to inges ```yaml # Configures how connections are pooled. pool_config: - [client_cleanup_period: ] + # How frequently to clean up clients for ingesters that have gone away. + # CLI flag: -distributor.client-cleanup-period + [client_cleanup_period: | default = 15s] - [health_check_ingesters: ] + # Run a health check on each ingester client during periodic cleanup. + # CLI flag: -distributor.health-check-ingesters + [health_check_ingesters: | default = true] - [remote_timeout: ] + # How quickly a dead client will be removed after it has been detected to + # disappear. Set this to a value to allow time for a secondary health check to + # recover the missing client. + # CLI flag: -ingester.client.healthcheck-timeout + [remote_timeout: | default = 1s] -[remote_timeout: ] +# The remote request timeout on the client side. +# CLI flag: -ingester.client.timeout +[remote_timeout: | default = 5s] # Configures how the gRPC connection to ingesters work as a client. -# The CLI flags prefix for this block configuration is: ingester-rf1.client +# The CLI flags prefix for this block configuration is: ingester.client [grpc_client_config: ] ``` @@ -4504,12 +4790,14 @@ scheduler_ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: query-scheduler.ring + # The CLI flags prefix for this block configuration is: + # query-scheduler.ring.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: query-scheduler.ring + # The CLI flags prefix for this block configuration is: + # query-scheduler.ring.etcd [etcd: ] multi: @@ -4810,12 +5098,12 @@ ring: # Configuration for a Consul client. Only applies if the selected kvstore is # consul. - # The CLI flags prefix for this block configuration is: ruler.ring + # The CLI flags prefix for this block configuration is: ruler.ring.consul [consul: ] # Configuration for an ETCD v3 client. Only applies if the selected kvstore # is etcd. - # The CLI flags prefix for this block configuration is: ruler.ring + # The CLI flags prefix for this block configuration is: ruler.ring.etcd [etcd: ] multi: diff --git a/go.mod b/go.mod index 8bf9a7296693..af3178535a3d 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/oschwald/geoip2-golang v1.11.0 // github.com/pierrec/lz4 v2.0.5+incompatible - github.com/pierrec/lz4/v4 v4.1.18 + github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 @@ -138,6 +138,8 @@ require ( github.com/schollz/progressbar/v3 v3.14.6 github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 github.com/thanos-io/objstore v0.0.0-20240828153123-de861b433240 + github.com/twmb/franz-go v1.17.1 + github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/willf/bloom v2.0.3+incompatible go.opentelemetry.io/collector/pdata v1.12.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f @@ -178,6 +180,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twmb/franz-go/pkg/kmsg v1.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect diff --git a/go.sum b/go.sum index 5977d2611ef7..799c3d424ccb 100644 --- a/go.sum +++ b/go.sum @@ -1585,8 +1585,8 @@ github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -1824,6 +1824,12 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twmb/franz-go v1.17.1 h1:0LwPsbbJeJ9R91DPUHSEd4su82WJWcTY1Zzbgbg4CeQ= +github.com/twmb/franz-go v1.17.1/go.mod h1:NreRdJ2F7dziDY/m6VyspWd6sNxHKXdMZI42UfQ3GXM= +github.com/twmb/franz-go/pkg/kmsg v1.8.0 h1:lAQB9Z3aMrIP9qF9288XcFf/ccaSxEitNA1CDTEIeTA= +github.com/twmb/franz-go/pkg/kmsg v1.8.0/go.mod h1:HzYEb8G3uu5XevZbtU0dVbkphaKTHk0X68N5ka4q6mU= +github.com/twmb/franz-go/plugin/kprom v1.1.0 h1:grGeIJbm4llUBF8jkDjTb/b8rKllWSXjMwIqeCCcNYQ= +github.com/twmb/franz-go/plugin/kprom v1.1.0/go.mod h1:cTDrPMSkyrO99LyGx3AtiwF9W6+THHjZrkDE2+TEBIU= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= diff --git a/pkg/ingester-kafka/ingester.go b/pkg/ingester-kafka/ingester.go new file mode 100644 index 000000000000..1609eb819780 --- /dev/null +++ b/pkg/ingester-kafka/ingester.go @@ -0,0 +1,499 @@ +package ingesterkafka + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/modules" + "github.com/grafana/dskit/multierror" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/grafana/loki/v3/pkg/ingester-kafka/kafka" + "github.com/grafana/loki/v3/pkg/ingester-kafka/partitionring" + util_log "github.com/grafana/loki/v3/pkg/util/log" + + "github.com/grafana/loki/v3/pkg/analytics" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/util" +) + +const ( + RingName = "kafka-ingester" + PartitionRingName = "kafka-partition" + + shutdownMarkerFilename = "shutdown-requested.txt" +) + +// ErrReadOnly is returned when the ingester is shutting down and a push was +// attempted. +var ( + ErrReadOnly = errors.New("Ingester is shutting down") + + activeTenantsStats = analytics.NewInt("ingester_active_tenants") + ingesterIDRegexp = regexp.MustCompile("ingester(-rf1)-([0-9]+)") +) + +// Config for an ingester. +type Config struct { + Enabled bool `yaml:"enabled" doc:"description=Whether the kafka ingester is enabled."` + + LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty" doc:"description=Configures how the lifecycle of the ingester will operate and where it will register for discovery."` + ShutdownMarkerPath string `yaml:"shutdown_marker_path"` + + // Used for the kafka ingestion path + PartitionRingConfig partitionring.Config `yaml:"partition_ring" category:"experimental"` + KafkaConfig kafka.Config +} + +// RegisterFlags registers the flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.LifecyclerConfig.RegisterFlagsWithPrefix("kafka-ingester", f, util_log.Logger) + + f.BoolVar(&cfg.Enabled, "kafka-ingester.enabled", false, "Whether the Kafka-based ingester path is enabled") + f.StringVar(&cfg.ShutdownMarkerPath, "kafka-ingester.shutdown-marker-path", "", "Path where the shutdown marker file is stored. If not set and common.path_prefix is set then common.path_prefix will be used.") +} + +type Wrapper interface { + Wrap(wrapped Interface) Interface +} + +// Storage is the store interface we need on the ingester. +type Storage interface { + PutObject(ctx context.Context, objectKey string, object io.Reader) error + Stop() +} + +// Interface is an interface for the Ingester +type Interface interface { + services.Service + http.Handler + + logproto.PusherServer + + CheckReady(ctx context.Context) error + FlushHandler(w http.ResponseWriter, _ *http.Request) + ShutdownHandler(w http.ResponseWriter, r *http.Request) + PrepareShutdown(w http.ResponseWriter, r *http.Request) +} + +// Ingester builds chunks for incoming log streams. +type Ingester struct { + services.Service + + cfg Config + logger log.Logger + + metrics *ingesterMetrics + + // Flag for whether stopping the ingester service should also terminate the + // loki process. + // This is set when calling the shutdown handler. + terminateOnShutdown bool + readonly bool + shutdownMtx sync.Mutex // Allows processes to grab a lock and prevent a shutdown + + lifecycler *ring.Lifecycler + lifecyclerWatcher *services.FailureWatcher + ingesterPartitionID int32 + partitionRingLifecycler *ring.PartitionInstanceLifecycler +} + +// New makes a new Ingester. +func New(cfg Config, + registerer prometheus.Registerer, + metricsNamespace string, + logger log.Logger, +) (*Ingester, error) { + metrics := newIngesterMetrics(registerer) + + ingesterPartitionID, err := extractIngesterPartitionID(cfg.LifecyclerConfig.ID) + if err != nil { + return nil, fmt.Errorf("calculating ingester partition ID: %w", err) + } + + partitionRingKV := cfg.LifecyclerConfig.RingConfig.KVStore.Mock + if partitionRingKV == nil { + partitionRingKV, err = kv.NewClient(cfg.LifecyclerConfig.RingConfig.KVStore, ring.GetPartitionRingCodec(), kv.RegistererWithKVName(registerer, PartitionRingName+"-lifecycler"), logger) + if err != nil { + return nil, fmt.Errorf("creating KV store for ingester partition ring: %w", err) + } + } + + partitionRingLifecycler := ring.NewPartitionInstanceLifecycler( + cfg.PartitionRingConfig.ToLifecyclerConfig(ingesterPartitionID, cfg.LifecyclerConfig.ID), + PartitionRingName, + PartitionRingName+"-key", + partitionRingKV, + logger, + prometheus.WrapRegistererWithPrefix("loki_", registerer)) + + i := &Ingester{ + cfg: cfg, + logger: logger, + ingesterPartitionID: ingesterPartitionID, + partitionRingLifecycler: partitionRingLifecycler, + metrics: metrics, + } + + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, RingName, RingName+"-ring", true, logger, prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer)) + if err != nil { + return nil, err + } + + i.lifecyclerWatcher = services.NewFailureWatcher() + i.lifecyclerWatcher.WatchService(i.lifecycler) + i.lifecyclerWatcher.WatchService(i.partitionRingLifecycler) + + i.Service = services.NewBasicService(i.starting, i.running, i.stopping) + + return i, nil +} + +// ingesterPartitionID returns the partition ID owner the the given ingester. +func extractIngesterPartitionID(ingesterID string) (int32, error) { + if strings.Contains(ingesterID, "local") { + return 0, nil + } + + match := ingesterIDRegexp.FindStringSubmatch(ingesterID) + if len(match) == 0 { + return 0, fmt.Errorf("ingester ID %s doesn't match regular expression %q", ingesterID, ingesterIDRegexp.String()) + } + // Parse the ingester sequence number. + ingesterSeq, err := strconv.Atoi(match[1]) + if err != nil { + return 0, fmt.Errorf("no ingester sequence number in ingester ID %s", ingesterID) + } + + return int32(ingesterSeq), nil +} + +// ServeHTTP implements the pattern ring status page. +func (i *Ingester) ServeHTTP(w http.ResponseWriter, r *http.Request) { + i.lifecycler.ServeHTTP(w, r) +} + +func (i *Ingester) starting(ctx context.Context) error { + // pass new context to lifecycler, so that it doesn't stop automatically when Ingester's service context is done + err := i.lifecycler.StartAsync(context.Background()) + if err != nil { + return err + } + + err = i.lifecycler.AwaitRunning(ctx) + if err != nil { + return err + } + + err = i.partitionRingLifecycler.StartAsync(context.Background()) + if err != nil { + return err + } + err = i.partitionRingLifecycler.AwaitRunning(ctx) + if err != nil { + return err + } + + shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename) + shutdownMarker, err := shutdownMarkerExists(shutdownMarkerPath) + if err != nil { + return fmt.Errorf("failed to check ingester shutdown marker: %w", err) + } + + if shutdownMarker { + level.Info(i.logger).Log("msg", "detected existing shutdown marker, setting unregister and flush on shutdown", "path", shutdownMarkerPath) + i.setPrepareShutdown() + } + + return nil +} + +func (i *Ingester) running(ctx context.Context) error { + var serviceError error + select { + // wait until service is asked to stop + case <-ctx.Done(): + // stop + case err := <-i.lifecyclerWatcher.Chan(): + serviceError = fmt.Errorf("lifecycler failed: %w", err) + } + + return serviceError +} + +// stopping is called when Ingester transitions to Stopping state. +// +// At this point, loop no longer runs, but flushers are still running. +func (i *Ingester) stopping(_ error) error { + i.stopIncomingRequests() + var errs util.MultiError + + //if i.flushOnShutdownSwitch.Get() { + // i.lifecycler.SetFlushOnShutdown(true) + //} + errs.Add(services.StopAndAwaitTerminated(context.Background(), i.lifecycler)) + + // i.streamRateCalculator.Stop() + + // In case the flag to terminate on shutdown is set or this instance is marked to release its resources, + // we need to mark the ingester service as "failed", so Loki will shut down entirely. + // The module manager logs the failure `modules.ErrStopProcess` in a special way. + if i.terminateOnShutdown && errs.Err() == nil { + i.removeShutdownMarkerFile() + return modules.ErrStopProcess + } + return errs.Err() +} + +// stopIncomingRequests is called when ingester is stopping +func (i *Ingester) stopIncomingRequests() { + i.shutdownMtx.Lock() + defer i.shutdownMtx.Unlock() + + i.readonly = true +} + +// removeShutdownMarkerFile removes the shutdown marker if it exists. Any errors are logged. +func (i *Ingester) removeShutdownMarkerFile() { + shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename) + exists, err := shutdownMarkerExists(shutdownMarkerPath) + if err != nil { + level.Error(i.logger).Log("msg", "error checking shutdown marker file exists", "err", err) + } + if exists { + err = removeShutdownMarker(shutdownMarkerPath) + if err != nil { + level.Error(i.logger).Log("msg", "error removing shutdown marker file", "err", err) + } + } +} + +// PrepareShutdown will handle the /ingester/prepare_shutdown endpoint. +// +// Internally, when triggered, this handler will configure the ingester service to release their resources whenever a SIGTERM is received. +// Releasing resources meaning flushing data, deleting tokens, and removing itself from the ring. +// +// It also creates a file on disk which is used to re-apply the configuration if the +// ingester crashes and restarts before being permanently shutdown. +// +// * `GET` shows the status of this configuration +// * `POST` enables this configuration +// * `DELETE` disables this configuration +func (i *Ingester) PrepareShutdown(w http.ResponseWriter, r *http.Request) { + if i.cfg.ShutdownMarkerPath == "" { + w.WriteHeader(http.StatusInternalServerError) + return + } + shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename) + + switch r.Method { + case http.MethodGet: + exists, err := shutdownMarkerExists(shutdownMarkerPath) + if err != nil { + level.Error(i.logger).Log("msg", "unable to check for prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if exists { + util.WriteTextResponse(w, "set") + } else { + util.WriteTextResponse(w, "unset") + } + case http.MethodPost: + if err := createShutdownMarker(shutdownMarkerPath); err != nil { + level.Error(i.logger).Log("msg", "unable to create prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + i.setPrepareShutdown() + level.Info(i.logger).Log("msg", "created prepare-shutdown marker file", "path", shutdownMarkerPath) + + w.WriteHeader(http.StatusNoContent) + case http.MethodDelete: + if err := removeShutdownMarker(shutdownMarkerPath); err != nil { + level.Error(i.logger).Log("msg", "unable to remove prepare-shutdown marker file", "path", shutdownMarkerPath, "err", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + i.unsetPrepareShutdown() + level.Info(i.logger).Log("msg", "removed prepare-shutdown marker file", "path", shutdownMarkerPath) + + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +// setPrepareShutdown toggles ingester lifecycler config to prepare for shutdown +func (i *Ingester) setPrepareShutdown() { + level.Info(i.logger).Log("msg", "preparing full ingester shutdown, resources will be released on SIGTERM") + i.lifecycler.SetFlushOnShutdown(true) + i.lifecycler.SetUnregisterOnShutdown(true) + i.terminateOnShutdown = true + i.metrics.shutdownMarker.Set(1) +} + +func (i *Ingester) unsetPrepareShutdown() { + level.Info(i.logger).Log("msg", "undoing preparation for full ingester shutdown") + i.lifecycler.SetFlushOnShutdown(true) + i.lifecycler.SetUnregisterOnShutdown(i.cfg.LifecyclerConfig.UnregisterOnShutdown) + i.terminateOnShutdown = false + i.metrics.shutdownMarker.Set(0) +} + +// createShutdownMarker writes a marker file to disk to indicate that an ingester is +// going to be scaled down in the future. The presence of this file means that an ingester +// should flush and upload all data when stopping. +func createShutdownMarker(p string) error { + // Write the file, fsync it, then fsync the containing directory in order to guarantee + // it is persisted to disk. From https://man7.org/linux/man-pages/man2/fsync.2.html + // + // > Calling fsync() does not necessarily ensure that the entry in the + // > directory containing the file has also reached disk. For that an + // > explicit fsync() on a file descriptor for the directory is also + // > needed. + file, err := os.Create(p) + if err != nil { + return err + } + + merr := multierror.New() + _, err = file.WriteString(time.Now().UTC().Format(time.RFC3339)) + merr.Add(err) + merr.Add(file.Sync()) + merr.Add(file.Close()) + + if err := merr.Err(); err != nil { + return err + } + + dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0o777) + if err != nil { + return err + } + + merr.Add(dir.Sync()) + merr.Add(dir.Close()) + return merr.Err() +} + +// removeShutdownMarker removes the shutdown marker file if it exists. +func removeShutdownMarker(p string) error { + err := os.Remove(p) + if err != nil && !os.IsNotExist(err) { + return err + } + + dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0o777) + if err != nil { + return err + } + + merr := multierror.New() + merr.Add(dir.Sync()) + merr.Add(dir.Close()) + return merr.Err() +} + +// shutdownMarkerExists returns true if the shutdown marker file exists, false otherwise +func shutdownMarkerExists(p string) (bool, error) { + s, err := os.Stat(p) + if err != nil && os.IsNotExist(err) { + return false, nil + } + + if err != nil { + return false, err + } + + return s.Mode().IsRegular(), nil +} + +// ShutdownHandler handles a graceful shutdown of the ingester service and +// termination of the Loki process. +func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) { + // Don't allow calling the shutdown handler multiple times + if i.State() != services.Running { + w.WriteHeader(http.StatusServiceUnavailable) + _, _ = w.Write([]byte("Ingester is stopping or already stopped.")) + return + } + params := r.URL.Query() + doFlush := util.FlagFromValues(params, "flush", true) + doDeleteRingTokens := util.FlagFromValues(params, "delete_ring_tokens", false) + doTerminate := util.FlagFromValues(params, "terminate", true) + err := i.handleShutdown(doTerminate, doFlush, doDeleteRingTokens) + + // Stopping the module will return the modules.ErrStopProcess error. This is + // needed so the Loki process is shut down completely. + if err == nil || err == modules.ErrStopProcess { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + } +} + +// handleShutdown triggers the following operations: +// - Change the state of ring to stop accepting writes. +// - optional: Flush all the chunks. +// - optional: Delete ring tokens file +// - Unregister from KV store +// - optional: Terminate process (handled by service manager in loki.go) +func (i *Ingester) handleShutdown(terminate, flush, del bool) error { + i.lifecycler.SetFlushOnShutdown(flush) + i.lifecycler.SetClearTokensOnShutdown(del) + i.lifecycler.SetUnregisterOnShutdown(true) + i.terminateOnShutdown = terminate + return services.StopAndAwaitTerminated(context.Background(), i) +} + +// Push implements logproto.Pusher. +func (i *Ingester) Push(_ context.Context, _ *logproto.PushRequest) (*logproto.PushResponse, error) { + return &logproto.PushResponse{}, nil +} + +// Watch implements grpc_health_v1.HealthCheck. +func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error { + return nil +} + +// ReadinessHandler is used to indicate to k8s when the ingesters are ready for +// the addition removal of another ingester. Returns 204 when the ingester is +// ready, 500 otherwise. +func (i *Ingester) CheckReady(ctx context.Context) error { + if s := i.State(); s != services.Running && s != services.Stopping { + return fmt.Errorf("ingester not ready: %v", s) + } + return i.lifecycler.CheckReady(ctx) +} + +// Flush implements ring.FlushTransferer +// Flush triggers a flush of all the chunks and closes the flush queues. +// Called from the Lifecycler as part of the ingester shutdown. +func (i *Ingester) Flush() { +} + +func (i *Ingester) TransferOut(_ context.Context) error { + return nil +} diff --git a/pkg/ingester-kafka/kafka/kafka_tee.go b/pkg/ingester-kafka/kafka/kafka_tee.go new file mode 100644 index 000000000000..e21f2ff5a77a --- /dev/null +++ b/pkg/ingester-kafka/kafka/kafka_tee.go @@ -0,0 +1,209 @@ +package kafka + +import ( + "context" + "errors" + "flag" + "fmt" + "math" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/twmb/franz-go/pkg/kgo" + + "github.com/twmb/franz-go/plugin/kprom" + + "github.com/grafana/loki/v3/pkg/distributor" + "github.com/grafana/loki/v3/pkg/logproto" +) + +const writeTimeout = time.Minute + +type Config struct { + Address string `yaml:"address" docs:"the kafka endpoint to connect to"` + Topic string `yaml:"topic" docs:"the kafka topic to write to"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Address, prefix+"address", "localhost:9092", "the kafka endpoint to connect to") + f.StringVar(&cfg.Topic, prefix+".topic", "loki.push", "The Kafka topic name.") +} + +type Tee struct { + logger log.Logger + kafkaClient *kgo.Client + partitionRing *ring.PartitionInstanceRing + + ingesterAppends *prometheus.CounterVec +} + +func NewTee( + cfg Config, + metricsNamespace string, + registerer prometheus.Registerer, + logger log.Logger, + partitionRing *ring.PartitionInstanceRing, +) (*Tee, error) { + registerer = prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer) + + metrics := kprom.NewMetrics( + "", // No prefix. We expect the input prometheus.Registered to be wrapped with a prefix. + kprom.Registerer(registerer), + kprom.FetchAndProduceDetail(kprom.Batches, kprom.Records, kprom.CompressedBytes, kprom.UncompressedBytes)) + + opts := append([]kgo.Opt{}, + kgo.SeedBrokers(cfg.Address), + + kgo.WithHooks(metrics), + // commonKafkaClientOptions(kafkaCfg, metrics, logger), + kgo.RequiredAcks(kgo.AllISRAcks()), + kgo.DefaultProduceTopic(cfg.Topic), + + kgo.AllowAutoTopicCreation(), + // We set the partition field in each record. + kgo.RecordPartitioner(kgo.ManualPartitioner()), + + // Set the upper bounds the size of a record batch. + kgo.ProducerBatchMaxBytes(1024*1024*1), + + // By default, the Kafka client allows 1 Produce in-flight request per broker. Disabling write idempotency + // (which we don't need), we can increase the max number of in-flight Produce requests per broker. A higher + // number of in-flight requests, in addition to short buffering ("linger") in client side before firing the + // next Produce request allows us to reduce the end-to-end latency. + // + // The result of the multiplication of producer linger and max in-flight requests should match the maximum + // Produce latency expected by the Kafka backend in a steady state. For example, 50ms * 20 requests = 1s, + // which means the Kafka client will keep issuing a Produce request every 50ms as far as the Kafka backend + // doesn't take longer than 1s to process them (if it takes longer, the client will buffer data and stop + // issuing new Produce requests until some previous ones complete). + kgo.DisableIdempotentWrite(), + kgo.ProducerLinger(50*time.Millisecond), + kgo.MaxProduceRequestsInflightPerBroker(20), + + // Unlimited number of Produce retries but a deadline on the max time a record can take to be delivered. + // With the default config it would retry infinitely. + // + // Details of the involved timeouts: + // - RecordDeliveryTimeout: how long a Kafka client Produce() call can take for a given record. The overhead + // timeout is NOT applied. + // - ProduceRequestTimeout: how long to wait for the response to the Produce request (the Kafka protocol message) + // after being sent on the network. The actual timeout is increased by the configured overhead. + // + // When a Produce request to Kafka fail, the client will retry up until the RecordDeliveryTimeout is reached. + // Once the timeout is reached, the Produce request will fail and all other buffered requests in the client + // (for the same partition) will fail too. See kgo.RecordDeliveryTimeout() documentation for more info. + kgo.RecordRetries(math.MaxInt64), + kgo.RecordDeliveryTimeout(time.Minute), + kgo.ProduceRequestTimeout(time.Minute), + kgo.RequestTimeoutOverhead(time.Minute), + + // Unlimited number of buffered records because we limit on bytes in Writer. The reason why we don't use + // kgo.MaxBufferedBytes() is because it suffers a deadlock issue: + // https://github.com/twmb/franz-go/issues/777 + kgo.MaxBufferedRecords(math.MaxInt), // Use a high value to set it as unlimited, because the client doesn't support "0 as unlimited". + kgo.MaxBufferedBytes(0), + ) + + kafkaClient, err := kgo.NewClient(opts...) + if err != nil { + panic("failed to start kafka client") + } + + t := &Tee{ + logger: log.With(logger, "component", "kafka-tee"), + ingesterAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "kafka_ingester_appends_total", + Help: "The total number of appends sent to kafka ingest path.", + }, []string{"partition", "status"}), + kafkaClient: kafkaClient, + partitionRing: partitionRing, + } + + return t, nil +} + +// Duplicate Implements distributor.Tee which is used to tee distributor requests to pattern ingesters. +func (t *Tee) Duplicate(tenant string, streams []distributor.KeyedStream) { + for idx := range streams { + go func(stream distributor.KeyedStream) { + if err := t.sendStream(tenant, stream); err != nil { + level.Error(t.logger).Log("msg", "failed to send stream to kafka", "err", err) + } + }(streams[idx]) + } +} + +func (t *Tee) sendStream(tenant string, stream distributor.KeyedStream) error { + partitionID, err := t.partitionRing.PartitionRing().ActivePartitionForKey(stream.HashKey) + if err != nil { + t.ingesterAppends.WithLabelValues("partition_unknown", "fail").Inc() + return fmt.Errorf("failed to find active partition for stream: %w", err) + } + records, err := marshalWriteRequestToRecords(partitionID, tenant, stream.Stream, 1024*1024) + + ctx, cancel := context.WithTimeout(user.InjectOrgID(context.Background(), tenant), writeTimeout) + defer cancel() + produceResults := t.kafkaClient.ProduceSync(ctx, records...) + + var finalErr error + for _, result := range produceResults { + if result.Err != nil { + t.ingesterAppends.WithLabelValues(fmt.Sprintf("partition_%d", partitionID), "fail").Inc() + finalErr = err + } else { + t.ingesterAppends.WithLabelValues(fmt.Sprintf("partition_%d", partitionID), "success").Inc() + } + } + + return finalErr +} + +// marshalWriteRequestToRecords marshals a mimirpb.WriteRequest to one or more Kafka records. +// The request may be split to multiple records to get that each single Kafka record +// data size is not bigger than maxSize. +// +// This function is a best-effort. The returned Kafka records are not strictly guaranteed to +// have their data size limited to maxSize. The reason is that the WriteRequest is split +// by each individual Timeseries and Metadata: if a single Timeseries or Metadata is bigger than +// maxSize, than the resulting record will be bigger than the limit as well. +func marshalWriteRequestToRecords(partitionID int32, tenantID string, stream logproto.Stream, maxSize int) ([]*kgo.Record, error) { + reqSize := stream.Size() + + if reqSize <= maxSize { + // No need to split the request. We can take a fast path. + rec, err := marshalWriteRequestToRecord(partitionID, tenantID, stream, reqSize) + if err != nil { + return nil, err + } + + return []*kgo.Record{rec}, nil + } + return nil, errors.New("large write requests are not supported yet") + + // return marshalWriteRequestsToRecords(partitionID, tenantID, mimirpb.SplitWriteRequestByMaxMarshalSize(req, reqSize, maxSize)) +} + +func marshalWriteRequestToRecord(partitionID int32, tenantID string, stream logproto.Stream, reqSize int) (*kgo.Record, error) { + // Marshal the request. + data := make([]byte, reqSize) + n, err := stream.MarshalToSizedBuffer(data) + if err != nil { + return nil, fmt.Errorf("failed to serialise write request: %w", err) + } + data = data[:n] + + return &kgo.Record{ + Key: []byte(tenantID), // We don't partition based on the key, so the value here doesn't make any difference. + Value: data, + Partition: partitionID, + }, nil +} diff --git a/pkg/ingester-kafka/metrics.go b/pkg/ingester-kafka/metrics.go new file mode 100644 index 000000000000..dce284f7c254 --- /dev/null +++ b/pkg/ingester-kafka/metrics.go @@ -0,0 +1,20 @@ +package ingesterkafka + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type ingesterMetrics struct { + // Shutdown marker for ingester scale down. + shutdownMarker prometheus.Gauge +} + +func newIngesterMetrics(r prometheus.Registerer) *ingesterMetrics { + return &ingesterMetrics{ + shutdownMarker: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "loki_ingester_kafka_shutdown_marker", + Help: "1 if prepare shutdown has been called, 0 otherwise.", + }), + } +} diff --git a/pkg/ingester-kafka/partitionring/partition_ring.go b/pkg/ingester-kafka/partitionring/partition_ring.go new file mode 100644 index 000000000000..dedfb8ac33bb --- /dev/null +++ b/pkg/ingester-kafka/partitionring/partition_ring.go @@ -0,0 +1,47 @@ +package partitionring + +import ( + "flag" + "time" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/ring" +) + +type Config struct { + KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances. This option needs be set on ingesters, distributors, queriers, and rulers when running in microservices mode."` + + // MinOwnersCount maps to ring.PartitionInstanceLifecyclerConfig's WaitOwnersCountOnPending. + MinOwnersCount int `yaml:"min_partition_owners_count"` + + // MinOwnersDuration maps to ring.PartitionInstanceLifecyclerConfig's WaitOwnersDurationOnPending. + MinOwnersDuration time.Duration `yaml:"min_partition_owners_duration"` + + // DeleteInactivePartitionAfter maps to ring.PartitionInstanceLifecyclerConfig's DeleteInactivePartitionAfterDuration. + DeleteInactivePartitionAfter time.Duration `yaml:"delete_inactive_partition_after"` + + // lifecyclerPollingInterval is the lifecycler polling interval. This setting is used to lower it in tests. + lifecyclerPollingInterval time.Duration +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + // Ring flags + cfg.KVStore.Store = "memberlist" // Override default value. + cfg.KVStore.RegisterFlagsWithPrefix("ingester.partition-ring.", "collectors/", f) + + f.IntVar(&cfg.MinOwnersCount, "ingester.partition-ring.min-partition-owners-count", 1, "Minimum number of owners to wait before a PENDING partition gets switched to ACTIVE.") + f.DurationVar(&cfg.MinOwnersDuration, "ingester.partition-ring.min-partition-owners-duration", 10*time.Second, "How long the minimum number of owners are enforced before a PENDING partition gets switched to ACTIVE.") + f.DurationVar(&cfg.DeleteInactivePartitionAfter, "ingester.partition-ring.delete-inactive-partition-after", 13*time.Hour, "How long to wait before an INACTIVE partition is eligible for deletion. The partition is deleted only if it has been in INACTIVE state for at least the configured duration and it has no owners registered. A value of 0 disables partitions deletion.") +} + +func (cfg *Config) ToLifecyclerConfig(partitionID int32, instanceID string) ring.PartitionInstanceLifecyclerConfig { + return ring.PartitionInstanceLifecyclerConfig{ + PartitionID: partitionID, + InstanceID: instanceID, + WaitOwnersCountOnPending: cfg.MinOwnersCount, + WaitOwnersDurationOnPending: cfg.MinOwnersDuration, + DeleteInactivePartitionAfterDuration: cfg.DeleteInactivePartitionAfter, + PollingInterval: cfg.lifecyclerPollingInterval, + } +} diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go index 8ee0d0e8928b..583aa6494e77 100644 --- a/pkg/ingester-rf1/ingester.go +++ b/pkg/ingester-rf1/ingester.go @@ -213,9 +213,7 @@ type Ingester struct { customStreamsTracker push.UsageTracker - // recalculateOwnedStreams periodically checks the ring for changes and recalculates owned streams for each instance. readRing ring.ReadRing - // recalculateOwnedStreams *recalculateOwnedStreams } // New makes a new Ingester. @@ -399,11 +397,6 @@ func (i *Ingester) starting(ctx context.Context) error { // return fmt.Errorf("can not start recalculate owned streams service: %w", err) //} - err = i.lifecycler.AwaitRunning(ctx) - if err != nil { - return fmt.Errorf("can not ensure recalculate owned streams service is running: %w", err) - } - go i.periodicStreamMaintenance() return nil } diff --git a/pkg/ingester-rf1/objstore/storage.go b/pkg/ingester-rf1/objstore/storage.go index beb544e1980a..889c7e0bb87a 100644 --- a/pkg/ingester-rf1/objstore/storage.go +++ b/pkg/ingester-rf1/objstore/storage.go @@ -6,6 +6,7 @@ import ( "io" "sort" + "github.com/opentracing/opentracing-go" "github.com/prometheus/common/model" "github.com/grafana/loki/v3/pkg/storage" @@ -94,6 +95,11 @@ func (m *Multi) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, } func (m *Multi) GetObjectRange(ctx context.Context, objectKey string, off, length int64) (io.ReadCloser, error) { + sp, _ := opentracing.StartSpanFromContext(ctx, "GetObjectRange") + if sp != nil { + sp.LogKV("objectKey", objectKey, "off", off, "length", length) + } + defer sp.Finish() s, err := m.GetStoreFor(model.Now()) if err != nil { return nil, err diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index 91b4c329a5c6..3885dffe6263 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -276,6 +276,21 @@ func applyConfigToRings(r, defaults *ConfigWrapper, rc lokiring.RingConfig, merg r.Pattern.LifecyclerConfig.ObservePeriod = rc.ObservePeriod } + if mergeWithExisting { + r.KafkaIngester.LifecyclerConfig.RingConfig.KVStore = rc.KVStore + r.KafkaIngester.LifecyclerConfig.HeartbeatPeriod = rc.HeartbeatPeriod + r.KafkaIngester.LifecyclerConfig.RingConfig.HeartbeatTimeout = rc.HeartbeatTimeout + r.KafkaIngester.LifecyclerConfig.TokensFilePath = rc.TokensFilePath + r.KafkaIngester.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled = rc.ZoneAwarenessEnabled + r.KafkaIngester.LifecyclerConfig.ID = rc.InstanceID + r.KafkaIngester.LifecyclerConfig.InfNames = rc.InstanceInterfaceNames + r.KafkaIngester.LifecyclerConfig.Port = rc.InstancePort + r.KafkaIngester.LifecyclerConfig.Addr = rc.InstanceAddr + r.KafkaIngester.LifecyclerConfig.Zone = rc.InstanceZone + r.KafkaIngester.LifecyclerConfig.ListenPort = rc.ListenPort + r.KafkaIngester.LifecyclerConfig.ObservePeriod = rc.ObservePeriod + } + // Distributor if mergeWithExisting || reflect.DeepEqual(r.Distributor.DistributorRing, defaults.Distributor.DistributorRing) { r.Distributor.DistributorRing.HeartbeatTimeout = rc.HeartbeatTimeout diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 724676e5987a..84282974e733 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -39,6 +39,9 @@ import ( "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" + ingesterkafka "github.com/grafana/loki/v3/pkg/ingester-kafka" + "github.com/grafana/loki/v3/pkg/ingester-kafka/kafka" + "github.com/grafana/loki/v3/pkg/ingester-kafka/partitionring" ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore" metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client" @@ -94,7 +97,7 @@ type Config struct { IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"` IngesterRF1Client ingester_client.Config `yaml:"ingester_rf1_client,omitempty"` Ingester ingester.Config `yaml:"ingester,omitempty"` - IngesterRF1 ingester_rf1.Config `yaml:"ingester_rf1,omitempty"` + IngesterRF1 ingester_rf1.Config `yaml:"ingester_rf1,omitempty" category:"experimental"` Pattern pattern.Config `yaml:"pattern_ingester,omitempty"` IndexGateway indexgateway.Config `yaml:"index_gateway"` BloomBuild bloombuild.Config `yaml:"bloom_build,omitempty" category:"experimental"` @@ -111,6 +114,9 @@ type Config struct { MemberlistKV memberlist.KVConfig `yaml:"memberlist"` Metastore metastore.Config `yaml:"metastore,omitempty"` MetastoreClient metastoreclient.Config `yaml:"metastore_client"` + PartitionRingConfig partitionring.Config `yaml:"partition_ring,omitempty" category:"experimental"` + KafkaConfig kafka.Config `yaml:"kafka_config,omitempty" category:"experimental"` + KafkaIngester ingesterkafka.Config `yaml:"kafka_ingester,omitempty" category:"experimental"` RuntimeConfig runtimeconfig.Config `yaml:"runtime_config,omitempty"` OperationalConfig runtime.Config `yaml:"operational_config,omitempty"` @@ -193,6 +199,9 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Profiling.RegisterFlags(f) c.Metastore.RegisterFlags(f) c.MetastoreClient.RegisterFlags(f) + c.PartitionRingConfig.RegisterFlags(f) + c.KafkaConfig.RegisterFlags(f) + c.KafkaIngester.RegisterFlags(f) } func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { @@ -373,6 +382,9 @@ type Loki struct { usageReport *analytics.Reporter indexGatewayRingManager *lokiring.RingManager MetastoreClient *metastoreclient.Client + partitionRingWatcher *ring.PartitionRingWatcher + partitionRing *ring.PartitionInstanceRing + kafkaIngester *ingesterkafka.Ingester ClientMetrics storage.ClientMetrics deleteClientMetrics *deletion.DeleteRequestClientMetrics @@ -676,6 +688,7 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(IngesterRF1, t.initIngesterRF1) + mm.RegisterModule(IngesterKafka, t.initKafkaIngester) mm.RegisterModule(IngesterRF1RingClient, t.initIngesterRF1RingClient, modules.UserInvisibleModule) mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier) mm.RegisterModule(IngesterGRPCInterceptors, t.initIngesterGRPCInterceptors, modules.UserInvisibleModule) @@ -702,6 +715,7 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(PatternIngester, t.initPatternIngester) mm.RegisterModule(Metastore, t.initMetastore) mm.RegisterModule(MetastoreClient, t.initMetastoreClient, modules.UserInvisibleModule) + mm.RegisterModule(PartitionRing, t.initPartitionRing, modules.UserInvisibleModule) mm.RegisterModule(All, nil) mm.RegisterModule(Read, nil) @@ -715,9 +729,10 @@ func (t *Loki) setupModuleManager() error { Overrides: {RuntimeConfig}, OverridesExporter: {Overrides, Server}, TenantConfigs: {RuntimeConfig}, - Distributor: {Ring, Server, Overrides, TenantConfigs, PatternRingClient, PatternIngesterTee, IngesterRF1RingClient, Analytics}, + Distributor: {Ring, Server, Overrides, TenantConfigs, PatternRingClient, PatternIngesterTee, IngesterRF1RingClient, Analytics, PartitionRing}, Store: {Overrides, IndexGatewayRing}, - IngesterRF1: {Store, Server, MemberlistKV, TenantConfigs, MetastoreClient, Analytics}, + IngesterRF1: {Store, Server, MemberlistKV, TenantConfigs, MetastoreClient, Analytics, PartitionRing}, + IngesterKafka: {PartitionRing}, Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics}, Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, MetastoreClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryFrontendTripperware: {Server, Overrides, TenantConfigs}, @@ -740,13 +755,14 @@ func (t *Loki) setupModuleManager() error { IngesterQuerier: {Ring}, QuerySchedulerRing: {Overrides, MemberlistKV}, IndexGatewayRing: {Overrides, MemberlistKV}, + PartitionRing: {MemberlistKV, Server, Ring}, MemberlistKV: {Server}, Read: {QueryFrontend, Querier}, - Write: {Ingester, IngesterRF1, Distributor, PatternIngester}, + Write: {Ingester, IngesterRF1, Distributor, PatternIngester, IngesterKafka}, Backend: {QueryScheduler, Ruler, Compactor, IndexGateway, BloomGateway}, - All: {QueryScheduler, QueryFrontend, Querier, Ingester, IngesterRF1, PatternIngester, Distributor, Ruler, Compactor, Metastore}, + All: {QueryScheduler, QueryFrontend, Querier, Ingester, IngesterRF1, PatternIngester, Distributor, Ruler, Compactor, Metastore, IngesterKafka}, } if t.Cfg.Querier.PerRequestLimitsEnabled { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 8099e812d1f5..4410525b377a 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -19,6 +19,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/dns" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/codec" "github.com/grafana/dskit/kv/memberlist" "github.com/grafana/dskit/middleware" @@ -46,6 +47,8 @@ import ( "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" + ingesterkafka "github.com/grafana/loki/v3/pkg/ingester-kafka" + "github.com/grafana/loki/v3/pkg/ingester-kafka/kafka" ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore" metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client" @@ -108,6 +111,7 @@ const ( CacheGenerationLoader string = "cache-generation-loader" Ingester string = "ingester" IngesterRF1 string = "ingester-rf1" + IngesterKafka string = "ingester-kafka" IngesterRF1RingClient string = "ingester-rf1-ring-client" PatternIngester string = "pattern-ingester" PatternIngesterTee string = "pattern-ingester-tee" @@ -143,6 +147,7 @@ const ( InitCodec string = "init-codec" Metastore string = "metastore" MetastoreClient string = "metastore-client" + PartitionRing string = "partition-ring" ) const ( @@ -336,6 +341,13 @@ func (t *Loki) initDistributor() (services.Service, error) { } t.Tee = distributor.WrapTee(t.Tee, rf1Tee) } + if t.Cfg.KafkaIngester.Enabled { + kafkaTee, err := kafka.NewTee(t.Cfg.KafkaConfig, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger, t.partitionRing) + if err != nil { + return nil, err + } + t.Tee = distributor.WrapTee(t.Tee, kafkaTee) + } var err error logger := log.With(util_log.Logger, "component", "distributor") @@ -637,6 +649,43 @@ func (t *Loki) initIngester() (_ services.Service, err error) { return t.Ingester, nil } +func (t *Loki) initKafkaIngester() (_ services.Service, err error) { + if !t.Cfg.KafkaIngester.Enabled { + return nil, nil + } + + logger := log.With(util_log.Logger, "component", "ingester-kafka") + t.Cfg.KafkaIngester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort + + if t.Cfg.KafkaIngester.ShutdownMarkerPath == "" && t.Cfg.Common.PathPrefix != "" { + t.Cfg.KafkaIngester.ShutdownMarkerPath = t.Cfg.Common.PathPrefix + } + if t.Cfg.KafkaIngester.ShutdownMarkerPath == "" { + level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work") + } + + t.kafkaIngester, err = ingesterkafka.New(t.Cfg.KafkaIngester, prometheus.DefaultRegisterer, t.Cfg.MetricsNamespace, logger) + if err != nil { + fmt.Println("Error initializing ingester rf1", err) + return + } + + // Not enabled for kafka ingester yet + // fmt.Println("registered GRPC") + // logproto.RegisterPusherRF1Server(t.Server.GRPC, t.IngesterRF1) + + httpMiddleware := middleware.Merge( + serverutil.RecoveryHTTPMiddleware, + ) + t.Server.HTTP.Methods("POST", "GET", "DELETE").Path("/ingester-rf1/prepare_shutdown").Handler( + httpMiddleware.Wrap(http.HandlerFunc(t.kafkaIngester.PrepareShutdown)), + ) + t.Server.HTTP.Methods("POST", "GET").Path("/ingester-rf1/shutdown").Handler( + httpMiddleware.Wrap(http.HandlerFunc(t.kafkaIngester.ShutdownHandler)), + ) + return t.kafkaIngester, nil +} + func (t *Loki) initIngesterRF1() (_ services.Service, err error) { if !t.Cfg.IngesterRF1.Enabled { return nil, nil @@ -1828,6 +1877,27 @@ func (t *Loki) initMetastoreClient() (services.Service, error) { return mc.Service(), nil } +// The Ingest Partition Ring is responsible for watching the available ingesters and assigning partitions to incoming requests. +func (t *Loki) initPartitionRing() (services.Service, error) { + if !t.Cfg.KafkaIngester.Enabled { // TODO: New config flag + return nil, nil + } + + // TODO: New config? + kvClient, err := kv.NewClient(t.Cfg.KafkaIngester.LifecyclerConfig.RingConfig.KVStore, ring.GetPartitionRingCodec(), kv.RegistererWithKVName(prometheus.DefaultRegisterer, ingesterkafka.PartitionRingName+"-watcher"), util_log.Logger) + if err != nil { + return nil, fmt.Errorf("creating KV store for partitions ring watcher: %w", err) + } + + t.partitionRingWatcher = ring.NewPartitionRingWatcher(ingesterkafka.PartitionRingName, ingesterkafka.PartitionRingName+"-key", kvClient, util_log.Logger, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer)) + t.partitionRing = ring.NewPartitionInstanceRing(t.partitionRingWatcher, t.ring, t.Cfg.Ingester.LifecyclerConfig.RingConfig.HeartbeatTimeout) + + // Expose a web page to view the partitions ring state. + t.Server.HTTP.Path("/partition-ring").Methods("GET", "POST").Handler(ring.NewPartitionRingPageHandler(t.partitionRingWatcher, ring.NewPartitionRingEditor(ingesterkafka.PartitionRingName+"-key", kvClient))) + + return t.partitionRingWatcher, nil +} + func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLimits) (deletion.DeleteRequestsClient, error) { if !t.supportIndexDeleteRequest() || !t.Cfg.CompactorConfig.RetentionEnabled { return deletion.NewNoOpDeleteRequestsStore(), nil diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md index 4629c9d0e03d..dee77545b0c2 100644 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -21,7 +21,7 @@ go get github.com/pierrec/lz4/v4 There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/v4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 000000000000..8df0dc76d007 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index a1bfa99e4b45..138083d9479c 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,12 +8,9 @@ const ( Block256Kb Block1Mb Block4Mb + Block8Mb = 2 * Block4Mb ) -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index 459086f09b29..e96465460c5d 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,9 +224,7 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] + data = data[:cap(data)] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go index 46a87380313f..57a44e767dc6 100644 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -57,6 +57,13 @@ func BlockSizeOption(size BlockSize) Option { } w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -72,6 +79,9 @@ func BlockChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.BlockChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -87,6 +97,9 @@ func ChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.ContentChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -104,6 +117,10 @@ func SizeOption(size uint64) Option { w.frame.Descriptor.Flags.SizeSet(size > 0) w.frame.Descriptor.ContentSize = size return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil } return lz4errors.ErrOptionNotApplicable } @@ -162,6 +179,14 @@ func CompressionLevelOption(level CompressionLevel) Option { } w.level = lz4block.CompressionLevel(level) return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -186,6 +211,9 @@ func OnBlockDoneOption(handler func(size int)) Option { case *Reader: rw.handler = handler return nil + case *CompressingReader: + rw.handler = handler + return nil } return lz4errors.ErrOptionNotApplicable } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 77699f2b54aa..4358adee1093 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -150,6 +150,10 @@ func (w *Writer) Flush() (err error) { case writeState: case errorState: return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } default: return nil } diff --git a/vendor/github.com/twmb/franz-go/LICENSE b/vendor/github.com/twmb/franz-go/LICENSE new file mode 100644 index 000000000000..36e18034325d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go new file mode 100644 index 000000000000..487e7f6c2a3b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go @@ -0,0 +1,856 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +// VarlongLen returns how long i would be if it were varlong encoded. +func VarlongLen(i int64) int { + u := uint64(i)<<1 ^ uint64(i>>63) + return uvarlongLen(u) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go b/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go new file mode 100644 index 000000000000..731a23a1975a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go @@ -0,0 +1,315 @@ +// Package kerr contains Kafka errors. +// +// The errors are undocumented to avoid duplicating the official descriptions +// that can be found at https://kafka.apache.org/protocol.html#protocol_error_codes (although, +// this code does duplicate the descriptions into the errors themselves, so the +// descriptions can be seen as the documentation). +// +// Since this package is dedicated to errors and the package is named "kerr", +// all errors elide the standard "Err" prefix. +package kerr + +import ( + "errors" + "fmt" +) + +// Error is a Kafka error. +type Error struct { + // Message is the string form of a Kafka error code + // (UNKNOWN_SERVER_ERROR, etc). + Message string + // Code is a Kafka error code. + Code int16 + // Retriable is whether the error is considered retriable by Kafka. + Retriable bool + // Description is a succinct description of what this error means. + Description string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Message, e.Description) +} + +// ErrorForCode returns the error corresponding to the given error code. +// +// If the code is unknown, this returns UnknownServerError. +// If the code is 0, this returns nil. +func ErrorForCode(code int16) error { + err, exists := code2err[code] + if !exists { + return UnknownServerError + } + return err +} + +// TypedErrorForCode returns the kerr.Error corresponding to the given error +// code. +// +// If the code is unknown, this returns UnknownServerError. +// If the code is 0, this returns nil. +// +// Note that this function is provided as a simplicity function for code that +// needs to work with the *Error only, but this function comes with caveats. +// Because this can return a typed nil, passing the return of this to a +// function that accepts an error (the Go error interface), the return from +// this will never be considered a nil error. Instead, it will be an error with +// a nil internal value. +func TypedErrorForCode(code int16) *Error { + err, exists := code2err[code] + if !exists { + return UnknownServerError + } + if err == nil { + return nil + } + return err.(*Error) +} + +// IsRetriable returns whether a Kafka error is considered retriable. +func IsRetriable(err error) bool { + var kerr *Error + return errors.As(err, &kerr) && kerr.Retriable +} + +var ( + UnknownServerError = &Error{"UNKNOWN_SERVER_ERROR", -1, false, "The server experienced an unexpected error when processing the request."} + OffsetOutOfRange = &Error{"OFFSET_OUT_OF_RANGE", 1, false, "The requested offset is not within the range of offsets maintained by the server."} + CorruptMessage = &Error{"CORRUPT_MESSAGE", 2, true, "This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt."} + UnknownTopicOrPartition = &Error{"UNKNOWN_TOPIC_OR_PARTITION", 3, true, "This server does not host this topic-partition."} + InvalidFetchSize = &Error{"INVALID_FETCH_SIZE", 4, false, "The requested fetch size is invalid."} + LeaderNotAvailable = &Error{"LEADER_NOT_AVAILABLE", 5, true, "There is no leader for this topic-partition as we are in the middle of a leadership election."} + NotLeaderForPartition = &Error{"NOT_LEADER_FOR_PARTITION", 6, true, "This server is not the leader for that topic-partition."} + RequestTimedOut = &Error{"REQUEST_TIMED_OUT", 7, true, "The request timed out."} + BrokerNotAvailable = &Error{"BROKER_NOT_AVAILABLE", 8, true, "The broker is not available."} + ReplicaNotAvailable = &Error{"REPLICA_NOT_AVAILABLE", 9, true, "The replica is not available for the requested topic-partition."} + MessageTooLarge = &Error{"MESSAGE_TOO_LARGE", 10, false, "The request included a message larger than the max message size the server will accept."} + StaleControllerEpoch = &Error{"STALE_CONTROLLER_EPOCH", 11, false, "The controller moved to another broker."} + OffsetMetadataTooLarge = &Error{"OFFSET_METADATA_TOO_LARGE", 12, false, "The metadata field of the offset request was too large."} + NetworkException = &Error{"NETWORK_EXCEPTION", 13, true, "The server disconnected before a response was received."} + CoordinatorLoadInProgress = &Error{"COORDINATOR_LOAD_IN_PROGRESS", 14, true, "The coordinator is loading and hence can't process requests."} + CoordinatorNotAvailable = &Error{"COORDINATOR_NOT_AVAILABLE", 15, true, "The coordinator is not available."} + NotCoordinator = &Error{"NOT_COORDINATOR", 16, true, "This is not the correct coordinator."} + InvalidTopicException = &Error{"INVALID_TOPIC_EXCEPTION", 17, false, "The request attempted to perform an operation on an invalid topic."} + RecordListTooLarge = &Error{"RECORD_LIST_TOO_LARGE", 18, false, "The request included message batch larger than the configured segment size on the server."} + NotEnoughReplicas = &Error{"NOT_ENOUGH_REPLICAS", 19, true, "Messages are rejected since there are fewer in-sync replicas than required."} + NotEnoughReplicasAfterAppend = &Error{"NOT_ENOUGH_REPLICAS_AFTER_APPEND", 20, true, "Messages are written to the log, but to fewer in-sync replicas than required."} + InvalidRequiredAcks = &Error{"INVALID_REQUIRED_ACKS", 21, false, "Produce request specified an invalid value for required acks."} + IllegalGeneration = &Error{"ILLEGAL_GENERATION", 22, false, "Specified group generation id is not valid."} + InconsistentGroupProtocol = &Error{"INCONSISTENT_GROUP_PROTOCOL", 23, false, "The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list."} + InvalidGroupID = &Error{"INVALID_GROUP_ID", 24, false, "The configured groupID is invalid."} + UnknownMemberID = &Error{"UNKNOWN_MEMBER_ID", 25, false, "The coordinator is not aware of this member."} + InvalidSessionTimeout = &Error{"INVALID_SESSION_TIMEOUT", 26, false, "The session timeout is not within the range allowed by the broker (as configured by group.min.session.timeout.ms and group.max.session.timeout.ms)."} + RebalanceInProgress = &Error{"REBALANCE_IN_PROGRESS", 27, false, "The group is rebalancing, so a rejoin is needed."} + InvalidCommitOffsetSize = &Error{"INVALID_COMMIT_OFFSET_SIZE", 28, false, "The committing offset data size is not valid."} + TopicAuthorizationFailed = &Error{"TOPIC_AUTHORIZATION_FAILED", 29, false, "Not authorized to access topics: [Topic authorization failed.]"} + GroupAuthorizationFailed = &Error{"GROUP_AUTHORIZATION_FAILED", 30, false, "Not authorized to access group: Group authorization failed."} + ClusterAuthorizationFailed = &Error{"CLUSTER_AUTHORIZATION_FAILED", 31, false, "Cluster authorization failed."} + InvalidTimestamp = &Error{"INVALID_TIMESTAMP", 32, false, "The timestamp of the message is out of acceptable range."} + UnsupportedSaslMechanism = &Error{"UNSUPPORTED_SASL_MECHANISM", 33, false, "The broker does not support the requested SASL mechanism."} + IllegalSaslState = &Error{"ILLEGAL_SASL_STATE", 34, false, "Request is not valid given the current SASL state."} + UnsupportedVersion = &Error{"UNSUPPORTED_VERSION", 35, false, "The version of API is not supported."} + TopicAlreadyExists = &Error{"TOPIC_ALREADY_EXISTS", 36, false, "Topic with this name already exists."} + InvalidPartitions = &Error{"INVALID_PARTITIONS", 37, false, "Number of partitions is below 1."} + InvalidReplicationFactor = &Error{"INVALID_REPLICATION_FACTOR", 38, false, "Replication factor is below 1 or larger than the number of available brokers."} + InvalidReplicaAssignment = &Error{"INVALID_REPLICA_ASSIGNMENT", 39, false, "Replica assignment is invalid."} + InvalidConfig = &Error{"INVALID_CONFIG", 40, false, "Configuration is invalid."} + NotController = &Error{"NOT_CONTROLLER", 41, true, "This is not the correct controller for this cluster."} + InvalidRequest = &Error{"INVALID_REQUEST", 42, false, "This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."} + UnsupportedForMessageFormat = &Error{"UNSUPPORTED_FOR_MESSAGE_FORMAT", 43, false, "The message format version on the broker does not support the request."} + PolicyViolation = &Error{"POLICY_VIOLATION", 44, false, "Request parameters do not satisfy the configured policy."} + OutOfOrderSequenceNumber = &Error{"OUT_OF_ORDER_SEQUENCE_NUMBER", 45, false, "The broker received an out of order sequence number."} + DuplicateSequenceNumber = &Error{"DUPLICATE_SEQUENCE_NUMBER", 46, false, "The broker received a duplicate sequence number."} + InvalidProducerEpoch = &Error{"INVALID_PRODUCER_EPOCH", 47, false, "Producer attempted an operation with an old epoch."} + InvalidTxnState = &Error{"INVALID_TXN_STATE", 48, false, "The producer attempted a transactional operation in an invalid state."} + InvalidProducerIDMapping = &Error{"INVALID_PRODUCER_ID_MAPPING", 49, false, "The producer attempted to use a producer id which is not currently assigned to its transactional id."} + InvalidTransactionTimeout = &Error{"INVALID_TRANSACTION_TIMEOUT", 50, false, "The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms)."} + ConcurrentTransactions = &Error{"CONCURRENT_TRANSACTIONS", 51, false, "The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."} + TransactionCoordinatorFenced = &Error{"TRANSACTION_COORDINATOR_FENCED", 52, false, "Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."} + TransactionalIDAuthorizationFailed = &Error{"TRANSACTIONAL_ID_AUTHORIZATION_FAILED", 53, false, "Transactional ID authorization failed."} + SecurityDisabled = &Error{"SECURITY_DISABLED", 54, false, "Security features are disabled."} + OperationNotAttempted = &Error{"OPERATION_NOT_ATTEMPTED", 55, false, "The broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest."} + KafkaStorageError = &Error{"KAFKA_STORAGE_ERROR", 56, true, "Disk error when trying to access log file on the disk."} + LogDirNotFound = &Error{"LOG_DIR_NOT_FOUND", 57, false, "The user-specified log directory is not found in the broker config."} + SaslAuthenticationFailed = &Error{"SASL_AUTHENTICATION_FAILED", 58, false, "SASL Authentication failed."} + UnknownProducerID = &Error{"UNKNOWN_PRODUCER_ID", 59, false, "This exception is raised by the broker if it could not locate the producer metadata associated with the producerID in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerID are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception."} + ReassignmentInProgress = &Error{"REASSIGNMENT_IN_PROGRESS", 60, false, "A partition reassignment is in progress."} + DelegationTokenAuthDisabled = &Error{"DELEGATION_TOKEN_AUTH_DISABLED", 61, false, "Delegation Token feature is not enabled."} + DelegationTokenNotFound = &Error{"DELEGATION_TOKEN_NOT_FOUND", 62, false, "Delegation Token is not found on server."} + DelegationTokenOwnerMismatch = &Error{"DELEGATION_TOKEN_OWNER_MISMATCH", 63, false, "Specified Principal is not valid Owner/Renewer."} + DelegationTokenRequestNotAllowed = &Error{"DELEGATION_TOKEN_REQUEST_NOT_ALLOWED", 64, false, "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."} + DelegationTokenAuthorizationFailed = &Error{"DELEGATION_TOKEN_AUTHORIZATION_FAILED", 65, false, "Delegation Token authorization failed."} + DelegationTokenExpired = &Error{"DELEGATION_TOKEN_EXPIRED", 66, false, "Delegation Token is expired."} + InvalidPrincipalType = &Error{"INVALID_PRINCIPAL_TYPE", 67, false, "Supplied principalType is not supported."} + NonEmptyGroup = &Error{"NON_EMPTY_GROUP", 68, false, "The group is not empty."} + GroupIDNotFound = &Error{"GROUP_ID_NOT_FOUND", 69, false, "The group id does not exist."} + FetchSessionIDNotFound = &Error{"FETCH_SESSION_ID_NOT_FOUND", 70, true, "The fetch session ID was not found."} + InvalidFetchSessionEpoch = &Error{"INVALID_FETCH_SESSION_EPOCH", 71, true, "The fetch session epoch is invalid."} + ListenerNotFound = &Error{"LISTENER_NOT_FOUND", 72, true, "There is no listener on the leader broker that matches the listener on which metadata request was processed."} + TopicDeletionDisabled = &Error{"TOPIC_DELETION_DISABLED", 73, false, "Topic deletion is disabled."} + FencedLeaderEpoch = &Error{"FENCED_LEADER_EPOCH", 74, true, "The leader epoch in the request is older than the epoch on the broker"} + UnknownLeaderEpoch = &Error{"UNKNOWN_LEADER_EPOCH", 75, true, "The leader epoch in the request is newer than the epoch on the broker"} + UnsupportedCompressionType = &Error{"UNSUPPORTED_COMPRESSION_TYPE", 76, false, "The requesting client does not support the compression type of given partition."} + StaleBrokerEpoch = &Error{"STALE_BROKER_EPOCH", 77, false, "Broker epoch has changed"} + OffsetNotAvailable = &Error{"OFFSET_NOT_AVAILABLE", 78, true, "The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"} + MemberIDRequired = &Error{"MEMBER_ID_REQUIRED", 79, false, "The group member needs to have a valid member id before actually entering a consumer group"} + PreferredLeaderNotAvailable = &Error{"PREFERRED_LEADER_NOT_AVAILABLE", 80, true, "The preferred leader was not available"} + GroupMaxSizeReached = &Error{"GROUP_MAX_SIZE_REACHED", 81, false, "The consumer group has reached its max size"} + FencedInstanceID = &Error{"FENCED_INSTANCE_ID", 82, false, "The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."} + EligibleLeadersNotAvailable = &Error{"ELIGIBLE_LEADERS_NOT_AVAILABLE", 83, true, "Eligible topic partition leaders are not available"} + ElectionNotNeeded = &Error{"ELECTION_NOT_NEEDED", 84, true, "Leader election not needed for topic partition"} + NoReassignmentInProgress = &Error{"NO_REASSIGNMENT_IN_PROGRESS", 85, false, "No partition reassignment is in progress."} + GroupSubscribedToTopic = &Error{"GROUP_SUBSCRIBED_TO_TOPIC", 86, false, "Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."} + InvalidRecord = &Error{"INVALID_RECORD", 87, false, "This record has failed the validation on broker and hence be rejected."} + UnstableOffsetCommit = &Error{"UNSTABLE_OFFSET_COMMIT", 88, true, "There are unstable offsets that need to be cleared."} + ThrottlingQuotaExceeded = &Error{"THROTTLING_QUOTA_EXCEEDED", 89, true, "The throttling quota has been exceeded."} + ProducerFenced = &Error{"PRODUCER_FENCED", 90, false, "There is a newer producer with the same transactionalId which fences the current one."} + ResourceNotFound = &Error{"RESOURCE_NOT_FOUND", 91, false, "A request illegally referred to a resource that does not exist."} + DuplicateResource = &Error{"DUPLICATE_RESOURCE", 92, false, "A request illegally referred to the same resource twice."} + UnacceptableCredential = &Error{"UNACCEPTABLE_CREDENTIAL", 93, false, "Requested credential would not meet criteria for acceptability."} + InconsistentVoterSet = &Error{"INCONSISTENT_VOTER_SET", 94, false, "Indicates that either the sender or recipient of a voter-only request is not one of the expected voters."} + InvalidUpdateVersion = &Error{"INVALID_UPDATE_VERSION", 95, false, "The given update version was invalid."} + FeatureUpdateFailed = &Error{"FEATURE_UPDATE_FAILED", 96, false, "Unable to update finalized features due to an unexpected server error."} + PrincipalDeserializationFailure = &Error{"PRINCIPAL_DESERIALIZATION_FAILURE", 97, false, "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup."} + SnapshotNotFound = &Error{"SNAPSHOT_NOT_FOUND", 98, false, "Requested snapshot was not found."} + PositionOutOfRange = &Error{"POSITION_OUT_OF_RANGE", 99, false, "Requested position is not greater than or equal to zero, and less than the size of the snapshot."} + UnknownTopicID = &Error{"UNKNOWN_TOPIC_ID", 100, true, "This server does not host this topic ID."} + DuplicateBrokerRegistration = &Error{"DUPLICATE_BROKER_REGISTRATION", 101, false, "This broker ID is already in use."} + BrokerIDNotRegistered = &Error{"BROKER_ID_NOT_REGISTERED", 102, false, "The given broker ID was not registered."} + InconsistentTopicID = &Error{"INCONSISTENT_TOPIC_ID", 103, true, "The log's topic ID did not match the topic ID in the request."} + InconsistentClusterID = &Error{"INCONSISTENT_CLUSTER_ID", 104, false, "The clusterId in the request does not match that found on the server."} + TransactionalIDNotFound = &Error{"TRANSACTIONAL_ID_NOT_FOUND", 105, false, "The transactionalId could not be found."} + FetchSessionTopicIDError = &Error{"FETCH_SESSION_TOPIC_ID_ERROR", 106, true, "The fetch session encountered inconsistent topic ID usage."} + IneligibleReplica = &Error{"INELIGIBLE_REPLICA", 107, false, "The new ISR contains at least one ineligible replica."} + NewLeaderElected = &Error{"NEW_LEADER_ELECTED", 108, false, "The AlterPartition request successfully updated the partition state but the leader has changed."} + OffsetMovedToTieredStorage = &Error{"OFFSET_MOVED_TO_TIERED_STORAGE", 109, false, "The requested offset is moved to tiered storage."} + FencedMemberEpoch = &Error{"FENCED_MEMBER_EPOCH", 110, false, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin."} + UnreleasedInstanceID = &Error{"UNRELEASED_INSTANCE_ID", 111, false, "The instance ID is still used by another member in the consumer group. That member must leave first."} + UnsupportedAssignor = &Error{"UNSUPPORTED_ASSIGNOR", 112, false, "The assignor or its version range is not supported by the consumer group."} + StaleMemberEpoch = &Error{"STALE_MEMBER_EPOCH", 113, false, "The member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API."} + MismatchedEndpointType = &Error{"MISMATCHED_ENDPOINT_TYPE", 114, false, "The request was sent to an endpoint of the wrong type."} + UnsupportedEndpointType = &Error{"UNSUPPORTED_ENDPOINT_TYPE", 115, false, "This endpoint type is not supported yet."} + UnknownControllerID = &Error{"UNKNOWN_CONTROLLER_ID", 116, false, "This controller ID is not known"} +) + +var code2err = map[int16]error{ + -1: UnknownServerError, + 0: nil, + 1: OffsetOutOfRange, + 2: CorruptMessage, + 3: UnknownTopicOrPartition, + 4: InvalidFetchSize, + 5: LeaderNotAvailable, + 6: NotLeaderForPartition, + 7: RequestTimedOut, + 8: BrokerNotAvailable, + 9: ReplicaNotAvailable, + 10: MessageTooLarge, + 11: StaleControllerEpoch, + 12: OffsetMetadataTooLarge, + 13: NetworkException, + 14: CoordinatorLoadInProgress, + 15: CoordinatorNotAvailable, + 16: NotCoordinator, + 17: InvalidTopicException, + 18: RecordListTooLarge, + 19: NotEnoughReplicas, + 20: NotEnoughReplicasAfterAppend, + 21: InvalidRequiredAcks, + 22: IllegalGeneration, + 23: InconsistentGroupProtocol, + 24: InvalidGroupID, + 25: UnknownMemberID, + 26: InvalidSessionTimeout, + 27: RebalanceInProgress, + 28: InvalidCommitOffsetSize, + 29: TopicAuthorizationFailed, + 30: GroupAuthorizationFailed, + 31: ClusterAuthorizationFailed, + 32: InvalidTimestamp, + 33: UnsupportedSaslMechanism, + 34: IllegalSaslState, + 35: UnsupportedVersion, + 36: TopicAlreadyExists, + 37: InvalidPartitions, + 38: InvalidReplicationFactor, + 39: InvalidReplicaAssignment, + 40: InvalidConfig, + 41: NotController, + 42: InvalidRequest, + 43: UnsupportedForMessageFormat, + 44: PolicyViolation, + 45: OutOfOrderSequenceNumber, + 46: DuplicateSequenceNumber, + 47: InvalidProducerEpoch, + 48: InvalidTxnState, + 49: InvalidProducerIDMapping, + 50: InvalidTransactionTimeout, + 51: ConcurrentTransactions, + 52: TransactionCoordinatorFenced, + 53: TransactionalIDAuthorizationFailed, + 54: SecurityDisabled, + 55: OperationNotAttempted, + 56: KafkaStorageError, + 57: LogDirNotFound, + 58: SaslAuthenticationFailed, + 59: UnknownProducerID, + 60: ReassignmentInProgress, + 61: DelegationTokenAuthDisabled, + 62: DelegationTokenNotFound, + 63: DelegationTokenOwnerMismatch, + 64: DelegationTokenRequestNotAllowed, + 65: DelegationTokenAuthorizationFailed, + 66: DelegationTokenExpired, + 67: InvalidPrincipalType, + 68: NonEmptyGroup, + 69: GroupIDNotFound, + 70: FetchSessionIDNotFound, + 71: InvalidFetchSessionEpoch, + 72: ListenerNotFound, + 73: TopicDeletionDisabled, + 74: FencedLeaderEpoch, + 75: UnknownLeaderEpoch, + 76: UnsupportedCompressionType, + 77: StaleBrokerEpoch, + 78: OffsetNotAvailable, + 79: MemberIDRequired, + 80: PreferredLeaderNotAvailable, + 81: GroupMaxSizeReached, + 82: FencedInstanceID, + 83: EligibleLeadersNotAvailable, + 84: ElectionNotNeeded, + 85: NoReassignmentInProgress, + 86: GroupSubscribedToTopic, + 87: InvalidRecord, + 88: UnstableOffsetCommit, + 89: ThrottlingQuotaExceeded, + 90: ProducerFenced, + 91: ResourceNotFound, + 92: DuplicateResource, + 93: UnacceptableCredential, + 94: InconsistentVoterSet, + 95: InvalidUpdateVersion, + 96: FeatureUpdateFailed, + 97: PrincipalDeserializationFailure, + 98: SnapshotNotFound, + 99: PositionOutOfRange, + 100: UnknownTopicID, + 101: DuplicateBrokerRegistration, + 102: BrokerIDNotRegistered, + 103: InconsistentTopicID, + 104: InconsistentClusterID, + 105: TransactionalIDNotFound, + 106: FetchSessionTopicIDError, + 107: IneligibleReplica, + 108: NewLeaderElected, + 109: OffsetMovedToTieredStorage, // KIP-405, v3.5 + 110: FencedMemberEpoch, // KIP-848, released unstable in v3.6, stable in 3.7 + 111: UnreleasedInstanceID, // "" + 112: UnsupportedAssignor, // "" + 113: StaleMemberEpoch, // "" + 114: MismatchedEndpointType, // KIP-919, v3.7 + 115: UnsupportedEndpointType, // "" + 116: UnknownControllerID, // "" + +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go b/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go new file mode 100644 index 000000000000..bfdd3c1deb71 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go @@ -0,0 +1,76 @@ +package kgo + +import "sync/atomic" + +const ( + stateUnstarted = iota + stateWorking + stateContinueWorking +) + +type workLoop struct{ state atomicU32 } + +// maybeBegin returns whether a work loop should begin. +func (l *workLoop) maybeBegin() bool { + var state uint32 + var done bool + for !done { + switch state = l.state.Load(); state { + case stateUnstarted: + done = l.state.CompareAndSwap(state, stateWorking) + state = stateWorking + case stateWorking: + done = l.state.CompareAndSwap(state, stateContinueWorking) + state = stateContinueWorking + case stateContinueWorking: + done = true + } + } + + return state == stateWorking +} + +// maybeFinish demotes loop's internal state and returns whether work should +// keep going. This function should be called before looping to continue +// work. +// +// If again is true, this will avoid demoting from working to not +// working. Again would be true if the loop knows it should continue working; +// calling this function is necessary even in this case to update loop's +// internal state. +// +// This function is a no-op if the loop is already finished, but generally, +// since the loop itself calls MaybeFinish after it has been started, this +// should never be called if the loop is unstarted. +func (l *workLoop) maybeFinish(again bool) bool { + switch state := l.state.Load(); state { + // Working: + // If again, we know we should continue; keep our state. + // If not again, we try to downgrade state and stop. + // If we cannot, then something slipped in to say keep going. + case stateWorking: + if !again { + again = !l.state.CompareAndSwap(state, stateUnstarted) + } + // Continue: demote ourself and run again no matter what. + case stateContinueWorking: + l.state.Store(stateWorking) + again = true + } + + return again +} + +func (l *workLoop) hardFinish() { + l.state.Store(stateUnstarted) +} + +// lazyI32 is used in a few places where we want atomics _sometimes_. Some +// uses do not need to be atomic (notably, setup), and we do not want the +// noCopy guard. +// +// Specifically, this is used for a few int32 settings in the config. +type lazyI32 int32 + +func (v *lazyI32) store(s int32) { atomic.StoreInt32((*int32)(v), s) } +func (v *lazyI32) load() int32 { return atomic.LoadInt32((*int32)(v)) } diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go b/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go new file mode 100644 index 000000000000..c3d5a9a75085 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go @@ -0,0 +1,1507 @@ +package kgo + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/sasl" +) + +type pinReq struct { + kmsg.Request + min int16 + max int16 + pinMin bool + pinMax bool +} + +func (p *pinReq) SetVersion(v int16) { + if p.pinMin && v < p.min { + v = p.min + } + if p.pinMax && v > p.max { + v = p.max + } + p.Request.SetVersion(v) +} + +type promisedReq struct { + ctx context.Context + req kmsg.Request + promise func(kmsg.Response, error) + enqueue time.Time // used to calculate writeWait +} + +type promisedResp struct { + ctx context.Context + + corrID int32 + // With flexible headers, we skip tags at the end of the response + // header for now because they're currently unused. However, the + // ApiVersions response uses v0 response header (no tags) even if the + // response body has flexible versions. This is done in support of the + // v0 fallback logic that allows for indexing into an exact offset. + // Thus, for ApiVersions specifically, this is false even if the + // request is flexible. + // + // As a side note, this note was not mentioned in KIP-482 which + // introduced flexible versions, and was mentioned in passing in + // KIP-511 which made ApiVersion flexible, so discovering what was + // wrong was not too fun ("Note that ApiVersionsResponse is flexible + // version but the response header is not flexible" is *it* in the + // entire KIP.) + // + // To see the version pinning, look at the code generator function + // generateHeaderVersion in + // generator/src/main/java/org/apache/kafka/message/ApiMessageTypeGenerator.java + flexibleHeader bool + + resp kmsg.Response + promise func(kmsg.Response, error) + readTimeout time.Duration + + // The following block is used for the read / e2e hooks. + bytesWritten int + writeWait time.Duration + timeToWrite time.Duration + readEnqueue time.Time +} + +// NodeName returns the name of a node, given the kgo internal node ID. +// +// Internally, seed brokers are stored with very negative node IDs, and these +// node IDs are visible in the BrokerMetadata struct. You can use NodeName to +// convert the negative node ID into "seed_#". Brokers discovered through +// metadata responses have standard non-negative numbers and this function just +// returns the number as a string. +func NodeName(nodeID int32) string { + return logID(nodeID) +} + +func logID(id int32) string { + if id >= -10 { + return strconv.FormatInt(int64(id), 10) + } + return "seed_" + strconv.FormatInt(int64(id)-math.MinInt32, 10) +} + +// BrokerMetadata is metadata for a broker. +// +// This struct mirrors kmsg.MetadataResponseBroker. +type BrokerMetadata struct { + // NodeID is the broker node ID. + // + // Seed brokers will have very negative IDs; kgo does not try to map + // seed brokers to loaded brokers. You can use NodeName to convert + // the seed node ID into a formatted string. + NodeID int32 + + // Port is the port of the broker. + Port int32 + + // Host is the hostname of the broker. + Host string + + // Rack is an optional rack of the broker. It is invalid to modify this + // field. + // + // Seed brokers will not have a rack. + Rack *string + + _ struct{} // allow us to add fields later +} + +func (me BrokerMetadata) equals(other kmsg.MetadataResponseBroker) bool { + return me.NodeID == other.NodeID && + me.Port == other.Port && + me.Host == other.Host && + (me.Rack == nil && other.Rack == nil || + me.Rack != nil && other.Rack != nil && *me.Rack == *other.Rack) +} + +// broker manages the concept how a client would interact with a broker. +type broker struct { + cl *Client + + addr string // net.JoinHostPort(meta.Host, meta.Port) + meta BrokerMetadata + + // versions tracks the first load of an ApiVersions. We store this + // after the first connect, which helps speed things up on future + // reconnects (across any of the three broker connections) because we + // will never look up API versions for this broker again. + versions atomic.Value // *brokerVersions + + // The cxn fields each manage a single tcp connection to one broker. + // Each field is managed serially in handleReqs. This means that only + // one write can happen at a time, regardless of which connection the + // write goes to, but the write is expected to be fast whereas the wait + // for the response is expected to be slow. + // + // Produce requests go to cxnProduce, fetch to cxnFetch, join/sync go + // to cxnGroup, anything with TimeoutMillis goes to cxnSlow, and + // everything else goes to cxnNormal. + cxnNormal *brokerCxn + cxnProduce *brokerCxn + cxnFetch *brokerCxn + cxnGroup *brokerCxn + cxnSlow *brokerCxn + + reapMu sync.Mutex // held when modifying a brokerCxn + + // reqs manages incoming message requests. + reqs ringReq + // dead is an atomic so a backed up reqs cannot block broker stoppage. + dead atomicBool +} + +// brokerVersions is loaded once (and potentially a few times concurrently if +// multiple connections are opening at once) and then forever stored for a +// broker. +type brokerVersions struct { + versions [kmsg.MaxKey + 1]int16 +} + +func newBrokerVersions() *brokerVersions { + var v brokerVersions + for i := range &v.versions { + v.versions[i] = -1 + } + return &v +} + +func (*brokerVersions) len() int { return kmsg.MaxKey + 1 } + +func (b *broker) loadVersions() *brokerVersions { + loaded := b.versions.Load() + if loaded == nil { + return nil + } + return loaded.(*brokerVersions) +} + +func (b *broker) storeVersions(v *brokerVersions) { b.versions.Store(v) } + +const unknownControllerID = -1 + +var unknownBrokerMetadata = BrokerMetadata{ + NodeID: -1, +} + +// broker IDs are all positive, but Kafka uses -1 to signify unknown +// controllers. To avoid issues where a client broker ID map knows of +// a -1 ID controller, we start unknown seeds at MinInt32. +func unknownSeedID(seedNum int) int32 { + return int32(math.MinInt32 + seedNum) +} + +func (cl *Client) newBroker(nodeID int32, host string, port int32, rack *string) *broker { + return &broker{ + cl: cl, + + addr: net.JoinHostPort(host, strconv.Itoa(int(port))), + meta: BrokerMetadata{ + NodeID: nodeID, + Host: host, + Port: port, + Rack: rack, + }, + } +} + +// stopForever permanently disables this broker. +func (b *broker) stopForever() { + if b.dead.Swap(true) { + return + } + + b.reqs.die() // no more pushing + + b.reapMu.Lock() + defer b.reapMu.Unlock() + + b.cxnNormal.die() + b.cxnProduce.die() + b.cxnFetch.die() + b.cxnGroup.die() + b.cxnSlow.die() +} + +// do issues a request to the broker, eventually calling the response +// once a the request either fails or is responded to (with failure or not). +// +// The promise will block broker processing. +func (b *broker) do( + ctx context.Context, + req kmsg.Request, + promise func(kmsg.Response, error), +) { + pr := promisedReq{ctx, req, promise, time.Now()} + + first, dead := b.reqs.push(pr) + + if first { + go b.handleReqs(pr) + } else if dead { + promise(nil, errChosenBrokerDead) + } +} + +// waitResp runs a req, waits for the resp and returns the resp and err. +func (b *broker) waitResp(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + var resp kmsg.Response + var err error + done := make(chan struct{}) + wait := func(kresp kmsg.Response, kerr error) { + resp, err = kresp, kerr + close(done) + } + b.do(ctx, req, wait) + <-done + return resp, err +} + +func (b *broker) handleReqs(pr promisedReq) { + var more, dead bool +start: + if dead { + pr.promise(nil, errChosenBrokerDead) + } else { + b.handleReq(pr) + } + + pr, more, dead = b.reqs.dropPeek() + if more { + goto start + } +} + +func (b *broker) handleReq(pr promisedReq) { + req := pr.req + var cxn *brokerCxn + var retriedOnNewConnection bool +start: + { + var err error + if cxn, err = b.loadConnection(pr.ctx, req); err != nil { + // It is rare, but it is possible that the broker has + // an immediate issue on a new connection. We retry + // once. + if isRetryableBrokerErr(err) && !retriedOnNewConnection { + retriedOnNewConnection = true + goto start + } + pr.promise(nil, err) + return + } + } + + v := b.loadVersions() + + if int(req.Key()) > v.len() || b.cl.cfg.maxVersions != nil && !b.cl.cfg.maxVersions.HasKey(req.Key()) { + pr.promise(nil, errUnknownRequestKey) + return + } + + // If v.versions[0] is non-negative, then we loaded API + // versions. If the version for this request is negative, we + // know the broker cannot handle this request. + if v.versions[0] >= 0 && v.versions[req.Key()] < 0 { + pr.promise(nil, errBrokerTooOld) + return + } + + ourMax := req.MaxVersion() + if b.cl.cfg.maxVersions != nil { + userMax, _ := b.cl.cfg.maxVersions.LookupMaxKeyVersion(req.Key()) // we validated HasKey above + if userMax < ourMax { + ourMax = userMax + } + } + + // If brokerMax is negative at this point, we have no api + // versions because the client is pinned pre 0.10.0 and we + // stick with our max. + version := ourMax + if brokerMax := v.versions[req.Key()]; brokerMax >= 0 && brokerMax < ourMax { + version = brokerMax + } + + minVersion := int16(-1) + + // If the version now (after potential broker downgrading) is + // lower than we desire, we fail the request for the broker is + // too old. + if b.cl.cfg.minVersions != nil { + minVersion, _ = b.cl.cfg.minVersions.LookupMaxKeyVersion(req.Key()) + if minVersion > -1 && version < minVersion { + pr.promise(nil, errBrokerTooOld) + return + } + } + + req.SetVersion(version) // always go for highest version + setVersion := req.GetVersion() + if minVersion > -1 && setVersion < minVersion { + pr.promise(nil, fmt.Errorf("request key %d version returned %d below the user defined min of %d", req.Key(), setVersion, minVersion)) + return + } + if version < setVersion { + // If we want to set an old version, but the request is pinned + // high, we need to fail with errBrokerTooOld. The broker wants + // an old version, we want a high version. We rely on this + // error in backcompat request sharding. + pr.promise(nil, errBrokerTooOld) + return + } + + if !cxn.expiry.IsZero() && time.Now().After(cxn.expiry) { + // If we are after the reauth time, try to reauth. We + // can only have an expiry if we went the authenticate + // flow, so we know we are authenticating again. + // + // Some implementations (AWS) occasionally fail for + // unclear reasons (principals change, somehow). If + // we receive SASL_AUTHENTICATION_FAILED, we retry + // once on a new connection. See #249. + // + // For KIP-368. + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl expiry limit reached, reauthenticating", "broker", logID(cxn.b.meta.NodeID)) + if err := cxn.sasl(); err != nil { + cxn.die() + if errors.Is(err, kerr.SaslAuthenticationFailed) && !retriedOnNewConnection { + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl reauth failed, retrying once on new connection", "broker", logID(cxn.b.meta.NodeID), "err", err) + retriedOnNewConnection = true + goto start + } + pr.promise(nil, err) + return + } + } + + // Juuuust before we issue the request, we check if it was + // canceled. We could have previously tried this request, which + // then failed and retried. + // + // Checking the context was canceled here ensures we do not + // loop. We could be more precise with error tracking, though. + select { + case <-pr.ctx.Done(): + pr.promise(nil, pr.ctx.Err()) + return + default: + } + + // Produce requests (and only produce requests) can be written + // without receiving a reply. If we see required acks is 0, + // then we immediately call the promise with no response. + // + // We provide a non-nil *kmsg.ProduceResponse for + // *kmsg.ProduceRequest just to ensure we do not return with no + // error and no kmsg.Response, per the client contract. + // + // As documented on the client's Request function, if this is a + // *kmsg.ProduceRequest, we rewrite the acks to match the + // client configured acks, and we rewrite the timeout millis if + // acks is 0. We do this to ensure that our discard goroutine + // is used correctly, and so that we do not write a request + // with 0 acks and then send it to handleResps where it will + // not get a response. + var isNoResp bool + var noResp *kmsg.ProduceResponse + switch r := req.(type) { + case *produceRequest: + isNoResp = r.acks == 0 + case *kmsg.ProduceRequest: + r.Acks = b.cl.cfg.acks.val + if r.Acks == 0 { + isNoResp = true + r.TimeoutMillis = int32(b.cl.cfg.produceTimeout.Milliseconds()) + } + noResp = kmsg.NewPtrProduceResponse() + noResp.Version = req.GetVersion() + } + + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(pr.ctx, pr.enqueue, req) + + if writeErr != nil { + pr.promise(nil, writeErr) + cxn.die() + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return + } + + if isNoResp { + pr.promise(noResp, nil) + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + + cxn.waitResp(promisedResp{ + pr.ctx, + corrID, + req.IsFlexible() && req.Key() != 18, // response header not flexible if ApiVersions; see promisedResp doc + req.ResponseKind(), + pr.promise, + rt, + bytesWritten, + writeWait, + timeToWrite, + readEnqueue, + }) +} + +func (cxn *brokerCxn) hookWriteE2E(key int16, bytesWritten int, writeWait, timeToWrite time.Duration, writeErr error) { + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerE2E); ok { + h.OnBrokerE2E(cxn.b.meta, key, BrokerE2E{ + BytesWritten: bytesWritten, + WriteWait: writeWait, + TimeToWrite: timeToWrite, + WriteErr: writeErr, + }) + } + }) +} + +// bufPool is used to reuse issued-request buffers across writes to brokers. +type bufPool struct{ p *sync.Pool } + +func newBufPool() bufPool { + return bufPool{ + p: &sync.Pool{New: func() any { r := make([]byte, 1<<10); return &r }}, + } +} + +func (p bufPool) get() []byte { return (*p.p.Get().(*[]byte))[:0] } +func (p bufPool) put(b []byte) { p.p.Put(&b) } + +// loadConection returns the broker's connection, creating it if necessary +// and returning an error of if that fails. +func (b *broker) loadConnection(ctx context.Context, req kmsg.Request) (*brokerCxn, error) { + var ( + pcxn = &b.cxnNormal + isProduceCxn bool // see docs on brokerCxn.discard for why we do this + reqKey = req.Key() + _, isTimeout = req.(kmsg.TimeoutRequest) + ) + switch { + case reqKey == 0: + pcxn = &b.cxnProduce + isProduceCxn = true + case reqKey == 1: + pcxn = &b.cxnFetch + case reqKey == 11 || reqKey == 14: // join || sync + pcxn = &b.cxnGroup + case isTimeout: + pcxn = &b.cxnSlow + } + + if *pcxn != nil && !(*pcxn).dead.Load() { + return *pcxn, nil + } + + conn, err := b.connect(ctx) + if err != nil { + return nil, err + } + + cxn := &brokerCxn{ + cl: b.cl, + b: b, + + addr: b.addr, + conn: conn, + deadCh: make(chan struct{}), + } + if err = cxn.init(isProduceCxn); err != nil { + b.cl.cfg.logger.Log(LogLevelDebug, "connection initialization failed", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + cxn.closeConn() + return nil, err + } + b.cl.cfg.logger.Log(LogLevelDebug, "connection initialized successfully", "addr", b.addr, "broker", logID(b.meta.NodeID)) + + b.reapMu.Lock() + defer b.reapMu.Unlock() + *pcxn = cxn + return cxn, nil +} + +func (cl *Client) reapConnectionsLoop() { + idleTimeout := cl.cfg.connIdleTimeout + if idleTimeout < 0 { // impossible due to cfg.validate, but just in case + return + } + + ticker := time.NewTicker(idleTimeout) + defer ticker.Stop() + last := time.Now() + for { + select { + case <-cl.ctx.Done(): + return + case tick := <-ticker.C: + start := time.Now() + reaped := cl.reapConnections(idleTimeout) + dur := time.Since(start) + if reaped > 0 { + cl.cfg.logger.Log(LogLevelDebug, "reaped connections", "time_since_last_reap", tick.Sub(last), "reap_dur", dur, "num_reaped", reaped) + } + last = tick + } + } +} + +func (cl *Client) reapConnections(idleTimeout time.Duration) (total int) { + cl.brokersMu.Lock() + seeds := cl.loadSeeds() + brokers := make([]*broker, 0, len(cl.brokers)+len(seeds)) + brokers = append(brokers, cl.brokers...) + brokers = append(brokers, seeds...) + cl.brokersMu.Unlock() + + for _, broker := range brokers { + total += broker.reapConnections(idleTimeout) + } + return total +} + +func (b *broker) reapConnections(idleTimeout time.Duration) (total int) { + b.reapMu.Lock() + defer b.reapMu.Unlock() + + for _, cxn := range []*brokerCxn{ + b.cxnNormal, + b.cxnProduce, + b.cxnFetch, + b.cxnGroup, + b.cxnSlow, + } { + if cxn == nil || cxn.dead.Load() { + continue + } + + // If we have not written nor read in a long time, the + // connection can be reaped. If only one is idle, the other may + // be busy (or may not happen): + // + // - produce can write but never read + // - fetch can hang for a while reading (infrequent writes) + + lastWrite := time.Unix(0, cxn.lastWrite.Load()) + lastRead := time.Unix(0, cxn.lastRead.Load()) + + writeIdle := time.Since(lastWrite) > idleTimeout && !cxn.writing.Load() + readIdle := time.Since(lastRead) > idleTimeout && !cxn.reading.Load() + + if writeIdle && readIdle { + cxn.die() + total++ + } + } + return total +} + +// connect connects to the broker's addr, returning the new connection. +func (b *broker) connect(ctx context.Context) (net.Conn, error) { + b.cl.cfg.logger.Log(LogLevelDebug, "opening connection to broker", "addr", b.addr, "broker", logID(b.meta.NodeID)) + start := time.Now() + conn, err := b.cl.cfg.dialFn(ctx, "tcp", b.addr) + since := time.Since(start) + b.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerConnect); ok { + h.OnBrokerConnect(b.meta, since, conn, err) + } + }) + if err != nil { + if !errors.Is(err, ErrClientClosed) && !errors.Is(err, context.Canceled) && !strings.Contains(err.Error(), "operation was canceled") { + if errors.Is(err, io.EOF) { + b.cl.cfg.logger.Log(LogLevelWarn, "unable to open connection to broker due to an immediate EOF, which often means the client is using TLS when the broker is not expecting it (is TLS misconfigured?)", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + return nil, &ErrFirstReadEOF{kind: firstReadTLS, err: err} + } + b.cl.cfg.logger.Log(LogLevelWarn, "unable to open connection to broker", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + } + return nil, fmt.Errorf("unable to dial: %w", err) + } + b.cl.cfg.logger.Log(LogLevelDebug, "connection opened to broker", "addr", b.addr, "broker", logID(b.meta.NodeID)) + return conn, nil +} + +// brokerCxn manages an actual connection to a Kafka broker. This is separate +// the broker struct to allow lazy connection (re)creation. +type brokerCxn struct { + throttleUntil atomicI64 // atomic nanosec + + conn net.Conn + + cl *Client + b *broker + + addr string + + mechanism sasl.Mechanism + expiry time.Time + + corrID int32 + + // The following four fields are used for connection reaping. + // Write is only updated in one location; read is updated in three + // due to readConn, readConnAsync, and discard. + lastWrite atomicI64 + lastRead atomicI64 + writing atomicBool + reading atomicBool + + successes uint64 + + // resps manages reading kafka responses. + resps ringResp + // dead is an atomic so that a backed up resps cannot block cxn death. + dead atomicBool + // closed in cloneConn; allows throttle waiting to quit + deadCh chan struct{} +} + +func (cxn *brokerCxn) init(isProduceCxn bool) error { + hasVersions := cxn.b.loadVersions() != nil + if !hasVersions { + if cxn.b.cl.cfg.maxVersions == nil || cxn.b.cl.cfg.maxVersions.HasKey(18) { + if err := cxn.requestAPIVersions(); err != nil { + if !errors.Is(err, ErrClientClosed) && !isRetryableBrokerErr(err) { + cxn.cl.cfg.logger.Log(LogLevelError, "unable to request api versions", "broker", logID(cxn.b.meta.NodeID), "err", err) + } + return err + } + } else { + // We have a max versions, and it indicates no support + // for ApiVersions. We just store a default -1 set. + cxn.b.storeVersions(newBrokerVersions()) + } + } + + if err := cxn.sasl(); err != nil { + if !errors.Is(err, ErrClientClosed) && !isRetryableBrokerErr(err) { + cxn.cl.cfg.logger.Log(LogLevelError, "unable to initialize sasl", "broker", logID(cxn.b.meta.NodeID), "err", err) + } + return err + } + + if isProduceCxn && cxn.cl.cfg.acks.val == 0 { + go cxn.discard() // see docs on discard for why we do this + } + return nil +} + +func (cxn *brokerCxn) requestAPIVersions() error { + maxVersion := int16(3) + + // If the user configured a max versions, we check that the key exists + // before entering this function. Thus, we expect exists to be true, + // but we still doubly check it for sanity (as well as userMax, which + // can only be non-negative based off of LookupMaxKeyVersion's API). + if cxn.cl.cfg.maxVersions != nil { + userMax, exists := cxn.cl.cfg.maxVersions.LookupMaxKeyVersion(18) // 18 == api versions + if exists && userMax >= 0 { + maxVersion = userMax + } + } + +start: + req := kmsg.NewPtrApiVersionsRequest() + req.Version = maxVersion + req.ClientSoftwareName = cxn.cl.cfg.softwareName + req.ClientSoftwareVersion = cxn.cl.cfg.softwareVersion + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing api versions request", "broker", logID(cxn.b.meta.NodeID), "version", maxVersion) + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + if writeErr != nil { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return writeErr + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + // api versions does *not* use flexible response headers; see comment in promisedResp + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, false, rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + if len(rawResp) < 2 { + return fmt.Errorf("invalid length %d short response from ApiVersions request", len(rawResp)) + } + + resp := req.ResponseKind().(*kmsg.ApiVersionsResponse) + + // If we used a version larger than Kafka supports, Kafka replies with + // Version 0 and an UNSUPPORTED_VERSION error. + // + // Pre Kafka 2.4, we have to retry the request with version 0. + // Post, Kafka replies with all versions. + if rawResp[1] == 35 { + if maxVersion == 0 { + return errors.New("broker replied with UNSUPPORTED_VERSION to an ApiVersions request of version 0") + } + srawResp := string(rawResp) + if srawResp == "\x00\x23\x00\x00\x00\x00" || + // EventHubs erroneously replies with v1, so we check + // for that as well. + srawResp == "\x00\x23\x00\x00\x00\x00\x00\x00\x00\x00" { + cxn.cl.cfg.logger.Log(LogLevelDebug, "broker does not know our ApiVersions version, downgrading to version 0 and retrying", "broker", logID(cxn.b.meta.NodeID)) + maxVersion = 0 + goto start + } + resp.Version = 0 + } + + if err = resp.ReadFrom(rawResp); err != nil { + return fmt.Errorf("unable to read ApiVersions response: %w", err) + } + if len(resp.ApiKeys) == 0 { + return errors.New("ApiVersions response invalidly contained no ApiKeys") + } + + v := newBrokerVersions() + for _, key := range resp.ApiKeys { + if key.ApiKey > kmsg.MaxKey || key.ApiKey < 0 { + continue + } + v.versions[key.ApiKey] = key.MaxVersion + } + cxn.b.storeVersions(v) + return nil +} + +func (cxn *brokerCxn) sasl() error { + if len(cxn.cl.cfg.sasls) == 0 { + return nil + } + mechanism := cxn.cl.cfg.sasls[0] + retried := false + authenticate := false + + v := cxn.b.loadVersions() + req := kmsg.NewPtrSASLHandshakeRequest() + +start: + if mechanism.Name() != "GSSAPI" && v.versions[req.Key()] >= 0 { + req.Mechanism = mechanism.Name() + req.Version = v.versions[req.Key()] + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing SASLHandshakeRequest", "broker", logID(cxn.b.meta.NodeID)) + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + if writeErr != nil { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return writeErr + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, req.IsFlexible(), rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + resp := req.ResponseKind().(*kmsg.SASLHandshakeResponse) + if err = resp.ReadFrom(rawResp); err != nil { + return err + } + + err = kerr.ErrorForCode(resp.ErrorCode) + if err != nil { + if !retried && err == kerr.UnsupportedSaslMechanism { + for _, ours := range cxn.cl.cfg.sasls[1:] { + for _, supported := range resp.SupportedMechanisms { + if supported == ours.Name() { + mechanism = ours + retried = true + goto start + } + } + } + } + return err + } + authenticate = req.Version == 1 + } + cxn.cl.cfg.logger.Log(LogLevelDebug, "beginning sasl authentication", "broker", logID(cxn.b.meta.NodeID), "addr", cxn.addr, "mechanism", mechanism.Name(), "authenticate", authenticate) + cxn.mechanism = mechanism + return cxn.doSasl(authenticate) +} + +func (cxn *brokerCxn) doSasl(authenticate bool) error { + session, clientWrite, err := cxn.mechanism.Authenticate(cxn.cl.ctx, cxn.addr) + if err != nil { + return err + } + if len(clientWrite) == 0 { + return fmt.Errorf("unexpected server-write sasl with mechanism %s", cxn.mechanism.Name()) + } + + prereq := time.Now() // used below for sasl lifetime calculation + var lifetimeMillis int64 + + // Even if we do not wrap our reads/writes in SASLAuthenticate, we + // still use the SASLAuthenticate timeouts. + rt, wt := cxn.cl.connTimeouter.timeouts(kmsg.NewPtrSASLAuthenticateRequest()) + + // We continue writing until both the challenging is done AND the + // responses are done. We can have an additional response once we + // are done with challenges. + step := -1 + for done := false; !done || len(clientWrite) > 0; { + step++ + var challenge []byte + + if !authenticate { + buf := cxn.cl.bufPool.get() + + buf = append(buf[:0], 0, 0, 0, 0) + binary.BigEndian.PutUint32(buf, uint32(len(clientWrite))) + buf = append(buf, clientWrite...) + + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing raw sasl authenticate", "broker", logID(cxn.b.meta.NodeID), "addr", cxn.addr, "step", step) + _, _, _, _, err = cxn.writeConn(context.Background(), buf, wt, time.Now()) + + cxn.cl.bufPool.put(buf) + + if err != nil { + return err + } + if !done { + if _, challenge, _, _, err = cxn.readConn(context.Background(), rt, time.Now()); err != nil { + return err + } + } + } else { + req := kmsg.NewPtrSASLAuthenticateRequest() + req.SASLAuthBytes = clientWrite + req.Version = cxn.b.loadVersions().versions[req.Key()] + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing SASLAuthenticate", "broker", logID(cxn.b.meta.NodeID), "version", req.Version, "step", step) + + // Lifetime: we take the timestamp before we write our + // request; see usage below for why. + prereq = time.Now() + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + + // As mentioned above, we could have one final write + // without reading a response back (kerberos). If this + // is the case, we need to e2e. + if writeErr != nil || done { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + if writeErr != nil { + return writeErr + } + } + if !done { + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, req.IsFlexible(), rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + resp := req.ResponseKind().(*kmsg.SASLAuthenticateResponse) + if err = resp.ReadFrom(rawResp); err != nil { + return err + } + + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + if resp.ErrorMessage != nil { + return fmt.Errorf("%s: %w", *resp.ErrorMessage, err) + } + return err + } + challenge = resp.SASLAuthBytes + lifetimeMillis = resp.SessionLifetimeMillis + } + } + + clientWrite = nil + + if !done { + if done, clientWrite, err = session.Challenge(challenge); err != nil { + return err + } + } + } + + if lifetimeMillis > 0 { + // Lifetime is problematic. We need to be a bit pessimistic. + // + // We want a lowerbound: we use 1s (arbitrary), but if 1.1x our + // e2e sasl latency is more than 1s, we use the latency. + // + // We do not want to reauthenticate too close to the lifetime + // especially for larger lifetimes due to clock issues (#205). + // We take 95% to 98% of the lifetime. + minPessimismMillis := float64(time.Second.Milliseconds()) + latencyMillis := 1.1 * float64(time.Since(prereq).Milliseconds()) + if latencyMillis > minPessimismMillis { + minPessimismMillis = latencyMillis + } + var random float64 + cxn.b.cl.rng(func(r *rand.Rand) { random = r.Float64() }) + maxPessimismMillis := float64(lifetimeMillis) * (0.05 - 0.03*random) // 95 to 98% of lifetime (pessimism 2% to 5%) + + // Our minimum lifetime is always 1s (or latency, if larger). + // When our max pessimism becomes more than min pessimism, + // every second after, we add between 0.05s or 0.08s to our + // backoff. At 12hr, we reauth ~24 to 28min before the + // lifetime. + usePessimismMillis := maxPessimismMillis + if minPessimismMillis > maxPessimismMillis { + usePessimismMillis = minPessimismMillis + } + useLifetimeMillis := lifetimeMillis - int64(usePessimismMillis) + + // Subtracting our min pessimism may result in our connection + // immediately expiring. We always accept this one reauth to + // issue our one request, and our next request will again + // reauth. Brokers should give us longer lifetimes, but that + // may not always happen (see #136, #249). + now := time.Now() + cxn.expiry = now.Add(time.Duration(useLifetimeMillis) * time.Millisecond) + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl has a limited lifetime", + "broker", logID(cxn.b.meta.NodeID), + "session_lifetime", time.Duration(lifetimeMillis)*time.Millisecond, + "lifetime_pessimism", time.Duration(usePessimismMillis)*time.Millisecond, + "reauthenticate_in", cxn.expiry.Sub(now), + ) + } + return nil +} + +// Some internal requests use the client context to issue requests, so if the +// client is closed, this select case can be selected. We want to return the +// proper error. +// +// This function is used in this file anywhere the client context can cause +// ErrClientClosed. +func maybeUpdateCtxErr(clientCtx, reqCtx context.Context, err *error) { + if clientCtx == reqCtx { + *err = ErrClientClosed + } +} + +// writeRequest writes a message request to the broker connection, bumping the +// connection's correlation ID as appropriate for the next write. +func (cxn *brokerCxn) writeRequest(ctx context.Context, enqueuedForWritingAt time.Time, req kmsg.Request) (corrID int32, bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) { + // A nil ctx means we cannot be throttled. + if ctx != nil { + throttleUntil := time.Unix(0, cxn.throttleUntil.Load()) + if sleep := time.Until(throttleUntil); sleep > 0 { + after := time.NewTimer(sleep) + select { + case <-after.C: + case <-ctx.Done(): + writeErr = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &writeErr) + case <-cxn.cl.ctx.Done(): + writeErr = ErrClientClosed + case <-cxn.deadCh: + writeErr = errChosenBrokerDead + } + if writeErr != nil { + after.Stop() + writeWait = time.Since(enqueuedForWritingAt) + return + } + } + } + + buf := cxn.cl.reqFormatter.AppendRequest( + cxn.cl.bufPool.get()[:0], + req, + cxn.corrID, + ) + + _, wt := cxn.cl.connTimeouter.timeouts(req) + bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr = cxn.writeConn(ctx, buf, wt, enqueuedForWritingAt) + + cxn.cl.bufPool.put(buf) + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerWrite); ok { + h.OnBrokerWrite(cxn.b.meta, req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + } + }) + if logger := cxn.cl.cfg.logger; logger.Level() >= LogLevelDebug { + logger.Log(LogLevelDebug, fmt.Sprintf("wrote %s v%d", kmsg.NameForKey(req.Key()), req.GetVersion()), "broker", logID(cxn.b.meta.NodeID), "bytes_written", bytesWritten, "write_wait", writeWait, "time_to_write", timeToWrite, "err", writeErr) + } + + if writeErr != nil { + return + } + corrID = cxn.corrID + cxn.corrID++ + if cxn.corrID < 0 { + cxn.corrID = 0 + } + return +} + +func (cxn *brokerCxn) writeConn( + ctx context.Context, + buf []byte, + timeout time.Duration, + enqueuedForWritingAt time.Time, +) (bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) { + cxn.writing.Store(true) + defer func() { + cxn.lastWrite.Store(time.Now().UnixNano()) + cxn.writing.Store(false) + }() + + if ctx == nil { + ctx = context.Background() + } + if timeout > 0 { + cxn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + defer cxn.conn.SetWriteDeadline(time.Time{}) + writeDone := make(chan struct{}) + go func() { + defer close(writeDone) + writeStart := time.Now() + bytesWritten, writeErr = cxn.conn.Write(buf) + // As soon as we are done writing, we track that we have now + // enqueued this request for reading. + readEnqueue = time.Now() + writeWait = writeStart.Sub(enqueuedForWritingAt) + timeToWrite = readEnqueue.Sub(writeStart) + }() + select { + case <-writeDone: + case <-cxn.cl.ctx.Done(): + cxn.conn.SetWriteDeadline(time.Now()) + <-writeDone + if writeErr != nil { + writeErr = ErrClientClosed + } + case <-ctx.Done(): + cxn.conn.SetWriteDeadline(time.Now()) + <-writeDone + if writeErr != nil && ctx.Err() != nil { + writeErr = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &writeErr) + } + } + return +} + +func (cxn *brokerCxn) readConn( + ctx context.Context, + timeout time.Duration, + enqueuedForReadingAt time.Time, +) (nread int, buf []byte, readWait, timeToRead time.Duration, err error) { + cxn.reading.Store(true) + defer func() { + cxn.lastRead.Store(time.Now().UnixNano()) + cxn.reading.Store(false) + }() + + if ctx == nil { + ctx = context.Background() + } + if timeout > 0 { + cxn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + defer cxn.conn.SetReadDeadline(time.Time{}) + readDone := make(chan struct{}) + go func() { + defer close(readDone) + sizeBuf := make([]byte, 4) + readStart := time.Now() + defer func() { + timeToRead = time.Since(readStart) + readWait = readStart.Sub(enqueuedForReadingAt) + }() + if nread, err = io.ReadFull(cxn.conn, sizeBuf); err != nil { + return + } + var size int32 + if size, err = cxn.parseReadSize(sizeBuf); err != nil { + return + } + buf = make([]byte, size) + var nread2 int + nread2, err = io.ReadFull(cxn.conn, buf) + nread += nread2 + buf = buf[:nread2] + if err != nil { + return + } + }() + select { + case <-readDone: + case <-cxn.cl.ctx.Done(): + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + if err != nil { + err = ErrClientClosed + } + case <-ctx.Done(): + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + if err != nil && ctx.Err() != nil { + err = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &err) + } + } + return +} + +// Parses a length 4 slice and enforces the min / max read size based off the +// client configuration. +func (cxn *brokerCxn) parseReadSize(sizeBuf []byte) (int32, error) { + size := int32(binary.BigEndian.Uint32(sizeBuf)) + if size < 0 { + return 0, fmt.Errorf("invalid negative response size %d", size) + } + if maxSize := cxn.b.cl.cfg.maxBrokerReadBytes; size > maxSize { + if size == 0x48545450 { // "HTTP" + return 0, fmt.Errorf("invalid large response size %d > limit %d; the four size bytes are 'HTTP' in ascii, the beginning of an HTTP response; is your broker port correct?", size, maxSize) + } + // A TLS alert is 21, and a TLS alert has the version + // following, where all major versions are 03xx. We + // look for an alert and major version byte to suspect + // if this we received a TLS alert. + tlsVersion := uint16(sizeBuf[1])<<8 | uint16(sizeBuf[2]) + if sizeBuf[0] == 21 && tlsVersion&0x0300 != 0 { + versionGuess := fmt.Sprintf("unknown TLS version (hex %x)", tlsVersion) + for _, guess := range []struct { + num uint16 + text string + }{ + {tls.VersionSSL30, "SSL v3"}, + {tls.VersionTLS10, "TLS v1.0"}, + {tls.VersionTLS11, "TLS v1.1"}, + {tls.VersionTLS12, "TLS v1.2"}, + {tls.VersionTLS13, "TLS v1.3"}, + } { + if tlsVersion == guess.num { + versionGuess = guess.text + } + } + return 0, fmt.Errorf("invalid large response size %d > limit %d; the first three bytes received appear to be a tls alert record for %s; is this a plaintext connection speaking to a tls endpoint?", size, maxSize, versionGuess) + } + return 0, fmt.Errorf("invalid large response size %d > limit %d", size, maxSize) + } + return size, nil +} + +// readResponse reads a response from conn, ensures the correlation ID is +// correct, and returns a newly allocated slice on success. +// +// This takes a bunch of extra arguments in support of HookBrokerE2E, overall +// this function takes 11 bytes in arguments. +func (cxn *brokerCxn) readResponse( + ctx context.Context, + key int16, + version int16, + corrID int32, + flexibleHeader bool, + timeout time.Duration, + bytesWritten int, + writeWait time.Duration, + timeToWrite time.Duration, + readEnqueue time.Time, +) ([]byte, error) { + bytesRead, buf, readWait, timeToRead, readErr := cxn.readConn(ctx, timeout, readEnqueue) + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerRead); ok { + h.OnBrokerRead(cxn.b.meta, key, bytesRead, readWait, timeToRead, readErr) + } + if h, ok := h.(HookBrokerE2E); ok { + h.OnBrokerE2E(cxn.b.meta, key, BrokerE2E{ + BytesWritten: bytesWritten, + BytesRead: bytesRead, + WriteWait: writeWait, + TimeToWrite: timeToWrite, + ReadWait: readWait, + TimeToRead: timeToRead, + ReadErr: readErr, + }) + } + }) + if logger := cxn.cl.cfg.logger; logger.Level() >= LogLevelDebug { + logger.Log(LogLevelDebug, fmt.Sprintf("read %s v%d", kmsg.NameForKey(key), version), "broker", logID(cxn.b.meta.NodeID), "bytes_read", bytesRead, "read_wait", readWait, "time_to_read", timeToRead, "err", readErr) + } + + if readErr != nil { + return nil, readErr + } + if len(buf) < 4 { + return nil, kbin.ErrNotEnoughData + } + gotID := int32(binary.BigEndian.Uint32(buf)) + if gotID != corrID { + return nil, errCorrelationIDMismatch + } + // If the response header is flexible, we skip the tags at the end of + // it. They are currently unused. + if flexibleHeader { + b := kbin.Reader{Src: buf[4:]} + kmsg.SkipTags(&b) + return b.Src, b.Complete() + } + return buf[4:], nil +} + +// closeConn is the one place we close broker connections. This is always done +// in either die, which is called when handleResps returns, or if init fails, +// which means we did not succeed enough to start handleResps. +func (cxn *brokerCxn) closeConn() { + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerDisconnect); ok { + h.OnBrokerDisconnect(cxn.b.meta, cxn.conn) + } + }) + cxn.conn.Close() + close(cxn.deadCh) +} + +// die kills a broker connection (which could be dead already) and replies to +// all requests awaiting responses appropriately. +func (cxn *brokerCxn) die() { + if cxn == nil || cxn.dead.Swap(true) { + return + } + cxn.closeConn() + cxn.resps.die() +} + +// waitResp, called serially by a broker's handleReqs, manages handling a +// message requests's response. +func (cxn *brokerCxn) waitResp(pr promisedResp) { + first, dead := cxn.resps.push(pr) + if first { + go cxn.handleResps(pr) + } else if dead { + pr.promise(nil, errChosenBrokerDead) + cxn.hookWriteE2E(pr.resp.Key(), pr.bytesWritten, pr.writeWait, pr.timeToWrite, errChosenBrokerDead) + } +} + +// If acks are zero, then a real Kafka installation never replies to produce +// requests. Unfortunately, Microsoft EventHubs rolled their own implementation +// and _does_ reply to ack-0 produce requests. We need to process these +// responses, because otherwise kernel buffers will fill up, Microsoft will be +// unable to reply, and then they will stop taking our produce requests. +// +// Thus, we just simply discard everything. +// +// Since we still want to support hooks, we still read the size of a response +// and then read that entire size before calling a hook. There are a few +// differences: +// +// (1) we do not know what version we produced, so we cannot validate the read, +// we just have to trust that the size is valid (and the data follows +// correctly). +// +// (2) rather than creating a slice for the response, we discard the entire +// response into a reusable small slice. The small size is because produce +// responses are relatively small to begin with, so we expect only a few reads +// per response. +// +// (3) we have no time for when the read was enqueued, so we miss that in the +// hook. +// +// (4) we start the time-to-read duration *after* the size bytes are read, +// since we have no idea when a read actually should start, since we should not +// receive responses to begin with. +// +// (5) we set a read deadline *after* the size bytes are read, and only if the +// client has not yet closed. +func (cxn *brokerCxn) discard() { + var firstTimeout bool + defer func() { + if !firstTimeout { // see below + cxn.die() + } else { + cxn.b.cl.cfg.logger.Log(LogLevelDebug, "produce acks==0 discard goroutine exiting; this broker looks to correctly not reply to ack==0 produce requests", "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID)) + } + }() + + discardBuf := make([]byte, 256) + for i := 0; ; i++ { + var ( + nread int + err error + timeToRead time.Duration + + deadlineMu sync.Mutex + deadlineSet bool + + readDone = make(chan struct{}) + ) + + // On all but the first request, we use no deadline. We could + // be hanging reading while we wait for more produce requests. + // We know we are talking to azure when i > 0 and we should not + // quit this goroutine. + // + // However, on the *first* produce request, we know that we are + // writing *right now*. We can deadline our read side with + // ample overhead, and if this first read hits the deadline, + // then we can quit this discard / read goroutine with no + // problems. + // + // We choose 3x our timeouts: + // - first we cover the write, connTimeoutOverhead + produceTimeout + // - then we cover the read, connTimeoutOverhead + // - then we throw in another connTimeoutOverhead just to be sure + // + deadline := time.Time{} + if i == 0 { + deadline = time.Now().Add(3*cxn.cl.cfg.requestTimeoutOverhead + cxn.cl.cfg.produceTimeout) + } + cxn.conn.SetReadDeadline(deadline) + + go func() { + defer close(readDone) + if nread, err = io.ReadFull(cxn.conn, discardBuf[:4]); err != nil { + if i == 0 && errors.Is(err, os.ErrDeadlineExceeded) { + firstTimeout = true + } + return + } + deadlineMu.Lock() + if !deadlineSet { + cxn.conn.SetReadDeadline(time.Now().Add(cxn.cl.cfg.produceTimeout)) + } + deadlineMu.Unlock() + + cxn.reading.Store(true) + defer func() { + cxn.lastRead.Store(time.Now().UnixNano()) + cxn.reading.Store(false) + }() + + readStart := time.Now() + defer func() { timeToRead = time.Since(readStart) }() + var size int32 + if size, err = cxn.parseReadSize(discardBuf[:4]); err != nil { + return + } + + var nread2 int + for size > 0 && err == nil { + discard := discardBuf + if int(size) < len(discard) { + discard = discard[:size] + } + nread2, err = cxn.conn.Read(discard) + nread += nread2 + size -= int32(nread2) // nread2 max is 128 + } + }() + + select { + case <-readDone: + case <-cxn.cl.ctx.Done(): + deadlineMu.Lock() + deadlineSet = true + deadlineMu.Unlock() + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + return + } + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerRead); ok { + h.OnBrokerRead(cxn.b.meta, 0, nread, 0, timeToRead, err) + } + }) + if err != nil { + return + } + } +} + +// handleResps serially handles all broker responses for an single connection. +func (cxn *brokerCxn) handleResps(pr promisedResp) { + var more, dead bool +start: + if dead { + pr.promise(nil, errChosenBrokerDead) + cxn.hookWriteE2E(pr.resp.Key(), pr.bytesWritten, pr.writeWait, pr.timeToWrite, errChosenBrokerDead) + } else { + cxn.handleResp(pr) + } + + pr, more, dead = cxn.resps.dropPeek() + if more { + goto start + } +} + +func (cxn *brokerCxn) handleResp(pr promisedResp) { + rawResp, err := cxn.readResponse( + pr.ctx, + pr.resp.Key(), + pr.resp.GetVersion(), + pr.corrID, + pr.flexibleHeader, + pr.readTimeout, + pr.bytesWritten, + pr.writeWait, + pr.timeToWrite, + pr.readEnqueue, + ) + if err != nil { + if !errors.Is(err, ErrClientClosed) && !errors.Is(err, context.Canceled) { + if cxn.successes > 0 || len(cxn.b.cl.cfg.sasls) > 0 { + cxn.b.cl.cfg.logger.Log(LogLevelDebug, "read from broker errored, killing connection", "req", kmsg.Key(pr.resp.Key()).Name(), "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID), "successful_reads", cxn.successes, "err", err) + } else { + cxn.b.cl.cfg.logger.Log(LogLevelWarn, "read from broker errored, killing connection after 0 successful responses (is SASL missing?)", "req", kmsg.Key(pr.resp.Key()).Name(), "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID), "err", err) + if err == io.EOF { // specifically avoid checking errors.Is to ensure this is not already wrapped + err = &ErrFirstReadEOF{kind: firstReadSASL, err: err} + } + } + } + pr.promise(nil, err) + cxn.die() + return + } + + cxn.successes++ + readErr := pr.resp.ReadFrom(rawResp) + + // If we had no error, we read the response successfully. + // + // Any response that can cause throttling satisfies the + // kmsg.ThrottleResponse interface. We check that here. + if readErr == nil { + if throttleResponse, ok := pr.resp.(kmsg.ThrottleResponse); ok { + millis, throttlesAfterResp := throttleResponse.Throttle() + if millis > 0 { + cxn.b.cl.cfg.logger.Log(LogLevelInfo, "broker is throttling us in response", "broker", logID(cxn.b.meta.NodeID), "req", kmsg.Key(pr.resp.Key()).Name(), "throttle_millis", millis, "throttles_after_resp", throttlesAfterResp) + if throttlesAfterResp { + throttleUntil := time.Now().Add(time.Millisecond * time.Duration(millis)).UnixNano() + if throttleUntil > cxn.throttleUntil.Load() { + cxn.throttleUntil.Store(throttleUntil) + } + } + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerThrottle); ok { + h.OnBrokerThrottle(cxn.b.meta, time.Duration(millis)*time.Millisecond, throttlesAfterResp) + } + }) + } + } + } + + pr.promise(pr.resp, readErr) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/client.go b/vendor/github.com/twmb/franz-go/pkg/kgo/client.go new file mode 100644 index 000000000000..775a22e6ee21 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/client.go @@ -0,0 +1,4553 @@ +// Package kgo provides a pure Go efficient Kafka client for Kafka 0.8+ with +// support for transactions, regex topic consuming, the latest partition +// strategies, and more. This client supports all client related KIPs. +// +// This client aims to be simple to use while still interacting with Kafka in a +// near ideal way. For more overview of the entire client itself, please see +// the README on the project's Github page. +package kgo + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "hash/crc32" + "math/rand" + "net" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/sasl" +) + +var crc32c = crc32.MakeTable(crc32.Castagnoli) // record crc's use Castagnoli table; for consuming/producing + +// Client issues requests and handles responses to a Kafka cluster. +type Client struct { + cfg cfg + opts []Opt + + ctx context.Context + ctxCancel func() + + rng func(func(*rand.Rand)) + + brokersMu sync.RWMutex + brokers []*broker // ordered by broker ID + seeds atomic.Value // []*broker, seed brokers, also ordered by ID + anyBrokerOrd []int32 // shuffled brokers, for random ordering + anySeedIdx int32 + stopBrokers bool // set to true on close to stop updateBrokers + + // A sink and a source is created once per node ID and persists + // forever. We expect the list to be small. + // + // The mutex only exists to allow consumer session stopping to read + // sources to notify when starting a session; all writes happen in the + // metadata loop. + sinksAndSourcesMu sync.Mutex + sinksAndSources map[int32]sinkAndSource + + reqFormatter *kmsg.RequestFormatter + connTimeouter connTimeouter + + bufPool bufPool // for to brokers to share underlying reusable request buffers + prsPool prsPool // for sinks to reuse []promisedNumberedRecord + + controllerIDMu sync.Mutex + controllerID int32 + + // The following two ensure that we only have one fetchBrokerMetadata + // at once. This avoids unnecessary broker metadata requests and + // metadata trampling. + fetchingBrokersMu sync.Mutex + fetchingBrokers *struct { + done chan struct{} + err error + } + + producer producer + consumer consumer + + compressor *compressor + decompressor *decompressor + + coordinatorsMu sync.Mutex + coordinators map[coordinatorKey]*coordinatorLoad + + updateMetadataCh chan string + updateMetadataNowCh chan string // like above, but with high priority + blockingMetadataFnCh chan func() + metawait metawait + metadone chan struct{} + + mappedMetaMu sync.Mutex + mappedMeta map[string]mappedMetadataTopic +} + +func (cl *Client) idempotent() bool { return !cl.cfg.disableIdempotency } + +type sinkAndSource struct { + sink *sink + source *source +} + +func (cl *Client) allSinksAndSources(fn func(sns sinkAndSource)) { + cl.sinksAndSourcesMu.Lock() + defer cl.sinksAndSourcesMu.Unlock() + + for _, sns := range cl.sinksAndSources { + fn(sns) + } +} + +type hostport struct { + host string + port int32 +} + +// ValidateOpts returns an error if the options are invalid. +func ValidateOpts(opts ...Opt) error { + _, _, _, err := validateCfg(opts...) + return err +} + +func parseSeeds(addrs []string) ([]hostport, error) { + seeds := make([]hostport, 0, len(addrs)) + for _, seedBroker := range addrs { + hp, err := parseBrokerAddr(seedBroker) + if err != nil { + return nil, err + } + seeds = append(seeds, hp) + } + return seeds, nil +} + +// This function validates the configuration and returns a few things that we +// initialize while validating. The difference between this and NewClient +// initialization is all NewClient initialization is infallible. +func validateCfg(opts ...Opt) (cfg, []hostport, *compressor, error) { + cfg := defaultCfg() + for _, opt := range opts { + opt.apply(&cfg) + } + if err := cfg.validate(); err != nil { + return cfg, nil, nil, err + } + seeds, err := parseSeeds(cfg.seedBrokers) + if err != nil { + return cfg, nil, nil, err + } + compressor, err := newCompressor(cfg.compression...) + if err != nil { + return cfg, nil, nil, err + } + return cfg, seeds, compressor, nil +} + +func namefn(fn any) string { + v := reflect.ValueOf(fn) + if v.Type().Kind() != reflect.Func { + return "" + } + name := runtime.FuncForPC(v.Pointer()).Name() + dot := strings.LastIndexByte(name, '.') + if dot >= 0 { + return name[dot+1:] + } + return name +} + +// OptValue returns the value for the given configuration option. If the +// given option does not exist, this returns nil. This function takes either a +// raw Opt, or an Opt function name. +// +// If a configuration option has multiple inputs, this function returns only +// the first input. If the function is a boolean function (such as +// BlockRebalanceOnPoll), this function returns the value of the internal bool. +// Variadic option inputs are returned as a single slice. Options that are +// internally stored as a pointer (ClientID, TransactionalID, and InstanceID) +// are returned as their string input; you can see if the option is internally +// nil by looking at the second value returned from OptValues. +// +// var ( +// cl, _ := NewClient( +// InstanceID("foo"), +// ConsumeTopics("foo", "bar"), +// ) +// iid = cl.OptValue(InstanceID) // iid is "foo" +// gid = cl.OptValue(ConsumerGroup) // gid is "" since groups are not used +// topics = cl.OptValue("ConsumeTopics") // topics is []string{"foo", "bar"}; string lookup for the option works +// bpoll = cl.OptValue(BlockRebalanceOnPoll) // bpoll is false +// t = cl.OptValue(SessionTimeout) // t is 45s, the internal default +// td = t.(time.Duration) // safe conversion since SessionTimeout's input is a time.Duration +// unk = cl.OptValue("Unknown"), // unk is nil +// ) +func (cl *Client) OptValue(opt any) any { + vs := cl.OptValues(opt) + if len(vs) > 0 { + return vs[0] + } + return nil +} + +// OptValues returns all values for options. This method is useful for +// options that have multiple inputs (notably, SoftwareNameAndVersion). This is +// also useful for options that are internally stored as a pointer (ClientID, +// TransactionalID, and InstanceID) -- this function will return the string +// value of the option but also whether the option is non-nil. Boolean options +// are returned as a single-element slice with the bool value. Variadic inputs +// are returned as a signle slice. If the input option does not exist, this +// returns nil. +// +// var ( +// cl, _ = NewClient( +// InstanceID("foo"), +// ConsumeTopics("foo", "bar"), +// ) +// idValues = cl.OptValues(InstanceID) // idValues is []any{"foo", true} +// tValues = cl.OptValues(SessionTimeout) // tValues is []any{45 * time.Second} +// topics = cl.OptValues(ConsumeTopics) // topics is []any{[]string{"foo", "bar"} +// bpoll = cl.OptValues(BlockRebalanceOnPoll) // bpoll is []any{false} +// unknown = cl.OptValues("Unknown") // unknown is nil +// ) +func (cl *Client) OptValues(opt any) []any { + name := namefn(opt) + if s, ok := opt.(string); ok { + name = s + } + cfg := &cl.cfg + + switch name { + case namefn(ClientID): + if cfg.id != nil { + return []any{*cfg.id, true} + } + return []any{"", false} + case namefn(SoftwareNameAndVersion): + return []any{cfg.softwareName, cfg.softwareVersion} + case namefn(WithLogger): + if _, wrapped := cfg.logger.(*wrappedLogger); wrapped { + return []any{cfg.logger.(*wrappedLogger).inner} + } + return []any{nil} + case namefn(RequestTimeoutOverhead): + return []any{cfg.requestTimeoutOverhead} + case namefn(ConnIdleTimeout): + return []any{cfg.connIdleTimeout} + case namefn(Dialer): + return []any{cfg.dialFn} + case namefn(DialTLSConfig): + return []any{cfg.dialTLS} + case namefn(DialTLS): + return []any{cfg.dialTLS != nil} + case namefn(SeedBrokers): + return []any{cfg.seedBrokers} + case namefn(MaxVersions): + return []any{cfg.maxVersions} + case namefn(MinVersions): + return []any{cfg.minVersions} + case namefn(RetryBackoffFn): + return []any{cfg.retryBackoff} + case namefn(RequestRetries): + return []any{cfg.retries} + case namefn(RetryTimeout): + return []any{cfg.retryTimeout(0)} + case namefn(RetryTimeoutFn): + return []any{cfg.retryTimeout} + case namefn(AllowAutoTopicCreation): + return []any{cfg.allowAutoTopicCreation} + case namefn(BrokerMaxWriteBytes): + return []any{cfg.maxBrokerWriteBytes} + case namefn(BrokerMaxReadBytes): + return []any{cfg.maxBrokerReadBytes} + case namefn(MetadataMaxAge): + return []any{cfg.metadataMaxAge} + case namefn(MetadataMinAge): + return []any{cfg.metadataMinAge} + case namefn(SASL): + return []any{cfg.sasls} + case namefn(WithHooks): + return []any{cfg.hooks} + case namefn(ConcurrentTransactionsBackoff): + return []any{cfg.txnBackoff} + case namefn(ConsiderMissingTopicDeletedAfter): + return []any{cfg.missingTopicDelete} + + case namefn(DefaultProduceTopic): + return []any{cfg.defaultProduceTopic} + case namefn(RequiredAcks): + return []any{cfg.acks} + case namefn(DisableIdempotentWrite): + return []any{cfg.disableIdempotency} + case namefn(MaxProduceRequestsInflightPerBroker): + return []any{cfg.maxProduceInflight} + case namefn(ProducerBatchCompression): + return []any{cfg.compression} + case namefn(ProducerBatchMaxBytes): + return []any{cfg.maxRecordBatchBytes} + case namefn(MaxBufferedRecords): + return []any{cfg.maxBufferedRecords} + case namefn(MaxBufferedBytes): + return []any{cfg.maxBufferedBytes} + case namefn(RecordPartitioner): + return []any{cfg.partitioner} + case namefn(ProduceRequestTimeout): + return []any{cfg.produceTimeout} + case namefn(RecordRetries): + return []any{cfg.recordRetries} + case namefn(UnknownTopicRetries): + return []any{cfg.maxUnknownFailures} + case namefn(StopProducerOnDataLossDetected): + return []any{cfg.stopOnDataLoss} + case namefn(ProducerOnDataLossDetected): + return []any{cfg.onDataLoss} + case namefn(ProducerLinger): + return []any{cfg.linger} + case namefn(ManualFlushing): + return []any{cfg.manualFlushing} + case namefn(RecordDeliveryTimeout): + return []any{cfg.recordTimeout} + case namefn(TransactionalID): + if cfg.txnID != nil { + return []any{cfg.txnID, true} + } + return []any{"", false} + case namefn(TransactionTimeout): + return []any{cfg.txnTimeout} + + case namefn(ConsumePartitions): + return []any{cfg.partitions} + case namefn(ConsumePreferringLagFn): + return []any{cfg.preferLagFn} + case namefn(ConsumeRegex): + return []any{cfg.regex} + case namefn(ConsumeResetOffset): + return []any{cfg.resetOffset} + case namefn(ConsumeTopics): + return []any{cfg.topics} + case namefn(DisableFetchSessions): + return []any{cfg.disableFetchSessions} + case namefn(FetchIsolationLevel): + return []any{cfg.isolationLevel} + case namefn(FetchMaxBytes): + return []any{int32(cfg.maxBytes)} + case namefn(FetchMaxPartitionBytes): + return []any{int32(cfg.maxPartBytes)} + case namefn(FetchMaxWait): + return []any{time.Duration(cfg.maxWait) * time.Millisecond} + case namefn(FetchMinBytes): + return []any{cfg.minBytes} + case namefn(KeepControlRecords): + return []any{cfg.keepControl} + case namefn(MaxConcurrentFetches): + return []any{cfg.maxConcurrentFetches} + case namefn(Rack): + return []any{cfg.rack} + case namefn(KeepRetryableFetchErrors): + return []any{cfg.keepRetryableFetchErrors} + + case namefn(AdjustFetchOffsetsFn): + return []any{cfg.adjustOffsetsBeforeAssign} + case namefn(AutoCommitCallback): + return []any{cfg.commitCallback} + case namefn(AutoCommitInterval): + return []any{cfg.autocommitInterval} + case namefn(AutoCommitMarks): + return []any{cfg.autocommitMarks} + case namefn(Balancers): + return []any{cfg.balancers} + case namefn(BlockRebalanceOnPoll): + return []any{cfg.blockRebalanceOnPoll} + case namefn(ConsumerGroup): + return []any{cfg.group} + case namefn(DisableAutoCommit): + return []any{cfg.autocommitDisable} + case namefn(GreedyAutoCommit): + return []any{cfg.autocommitGreedy} + case namefn(GroupProtocol): + return []any{cfg.protocol} + case namefn(HeartbeatInterval): + return []any{cfg.heartbeatInterval} + case namefn(InstanceID): + if cfg.instanceID != nil { + return []any{*cfg.instanceID, true} + } + return []any{"", false} + case namefn(OnOffsetsFetched): + return []any{cfg.onFetched} + case namefn(OnPartitionsAssigned): + return []any{cfg.onAssigned} + case namefn(OnPartitionsLost): + return []any{cfg.onLost} + case namefn(OnPartitionsRevoked): + return []any{cfg.onRevoked} + case namefn(RebalanceTimeout): + return []any{cfg.rebalanceTimeout} + case namefn(RequireStableFetchOffsets): + return []any{cfg.requireStable} + case namefn(SessionTimeout): + return []any{cfg.sessionTimeout} + default: + return nil + } +} + +// NewClient returns a new Kafka client with the given options or an error if +// the options are invalid. Connections to brokers are lazily created only when +// requests are written to them. +// +// By default, the client uses the latest stable request versions when talking +// to Kafka. If you use a broker older than 0.10.0, then you need to manually +// set a MaxVersions option. Otherwise, there is usually no harm in defaulting +// to the latest API versions, although occasionally Kafka introduces new +// required parameters that do not have zero value defaults. +// +// NewClient also launches a goroutine which periodically updates the cached +// topic metadata. +func NewClient(opts ...Opt) (*Client, error) { + cfg, seeds, compressor, err := validateCfg(opts...) + if err != nil { + return nil, err + } + + if cfg.retryTimeout == nil { + cfg.retryTimeout = func(key int16) time.Duration { + switch key { + case ((*kmsg.JoinGroupRequest)(nil)).Key(), + ((*kmsg.SyncGroupRequest)(nil)).Key(), + ((*kmsg.HeartbeatRequest)(nil)).Key(): + return cfg.sessionTimeout + } + return 30 * time.Second + } + } + + if cfg.dialFn == nil { + dialer := &net.Dialer{Timeout: cfg.dialTimeout} + cfg.dialFn = dialer.DialContext + if cfg.dialTLS != nil { + cfg.dialFn = func(ctx context.Context, network, host string) (net.Conn, error) { + c := cfg.dialTLS.Clone() + if c.ServerName == "" { + server, _, err := net.SplitHostPort(host) + if err != nil { + return nil, fmt.Errorf("unable to split host:port for dialing: %w", err) + } + c.ServerName = server + } + return (&tls.Dialer{ + NetDialer: dialer, + Config: c, + }).DialContext(ctx, network, host) + } + } + } + + ctx, cancel := context.WithCancel(context.Background()) + + cl := &Client{ + cfg: cfg, + opts: opts, + ctx: ctx, + ctxCancel: cancel, + + rng: func() func(func(*rand.Rand)) { + var mu sync.Mutex + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return func(fn func(*rand.Rand)) { + mu.Lock() + defer mu.Unlock() + fn(rng) + } + }(), + + controllerID: unknownControllerID, + + sinksAndSources: make(map[int32]sinkAndSource), + + reqFormatter: kmsg.NewRequestFormatter(), + connTimeouter: connTimeouter{def: cfg.requestTimeoutOverhead}, + + bufPool: newBufPool(), + prsPool: newPrsPool(), + + compressor: compressor, + decompressor: newDecompressor(), + + coordinators: make(map[coordinatorKey]*coordinatorLoad), + + updateMetadataCh: make(chan string, 1), + updateMetadataNowCh: make(chan string, 1), + blockingMetadataFnCh: make(chan func()), + metadone: make(chan struct{}), + } + + // Before we start any goroutines below, we must notify any interested + // hooks of our existence. + cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookNewClient); ok { + h.OnNewClient(cl) + } + }) + + cl.producer.init(cl) + cl.consumer.init(cl) + cl.metawait.init() + + if cfg.id != nil { + cl.reqFormatter = kmsg.NewRequestFormatter(kmsg.FormatterClientID(*cfg.id)) + } + + seedBrokers := make([]*broker, 0, len(seeds)) + for i, seed := range seeds { + b := cl.newBroker(unknownSeedID(i), seed.host, seed.port, nil) + seedBrokers = append(seedBrokers, b) + } + cl.seeds.Store(seedBrokers) + go cl.updateMetadataLoop() + go cl.reapConnectionsLoop() + + return cl, nil +} + +// Opts returns the options that were used to create this client. This can be +// as a base to generate a new client, where you can add override options to +// the end of the original input list. If you want to know a specific option +// value, you can use OptValue or OptValues. +func (cl *Client) Opts() []Opt { + return cl.opts +} + +func (cl *Client) loadSeeds() []*broker { + return cl.seeds.Load().([]*broker) +} + +// Ping returns whether any broker is reachable, iterating over any discovered +// broker or seed broker until one returns a successful response to an +// ApiVersions request. No discovered broker nor seed broker is attempted more +// than once. If all requests fail, this returns final error. +func (cl *Client) Ping(ctx context.Context) error { + req := kmsg.NewPtrApiVersionsRequest() + req.ClientSoftwareName = cl.cfg.softwareName + req.ClientSoftwareVersion = cl.cfg.softwareVersion + + cl.brokersMu.RLock() + brokers := append([]*broker(nil), cl.brokers...) + cl.brokersMu.RUnlock() + + var lastErr error + for _, brs := range [2][]*broker{ + brokers, + cl.loadSeeds(), + } { + for _, br := range brs { + _, err := br.waitResp(ctx, req) + if lastErr = err; lastErr == nil { + return nil + } + } + } + return lastErr +} + +// PurgeTopicsFromClient internally removes all internal information about the +// input topics. If you you want to purge information for only consuming or +// only producing, see the related functions [PurgeTopicsFromConsuming] and +// [PurgeTopicsFromProducing]. +// +// For producing, this clears all knowledge that these topics have ever been +// produced to. Producing to the topic again may result in out of order +// sequence number errors, or, if idempotency is disabled and the sequence +// numbers align, may result in invisibly discarded records at the broker. +// Purging a topic that was previously produced to may be useful to free up +// resources if you are producing to many disparate and short lived topic in +// the lifetime of this client and you do not plan to produce to the topic +// anymore. You may want to flush buffered records before purging if records +// for a topic you are purging are currently in flight. +// +// For consuming, this removes all concept of the topic from being consumed. +// This is different from PauseFetchTopics, which literally pauses the fetching +// of topics but keeps the topic information around for resuming fetching +// later. Purging a topic that was being consumed can be useful if you know the +// topic no longer exists, or if you are consuming via regex and know that some +// previously consumed topics no longer exist, or if you simply do not want to +// ever consume from a topic again. If you are group consuming, this function +// will likely cause a rebalance. +// +// For admin requests, this deletes the topic from the cached metadata map for +// sharded requests. Metadata for sharded admin requests is only cached for +// MetadataMinAge anyway, but the map is not cleaned up one the metadata +// expires. This function ensures the map is purged. +func (cl *Client) PurgeTopicsFromClient(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) // for logging in the functions + cl.blockingMetadataFn(func() { // make reasoning about concurrency easier + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + cl.producer.purgeTopics(topics) + }() + go func() { + defer wg.Done() + cl.consumer.purgeTopics(topics) + }() + wg.Wait() + }) + cl.mappedMetaMu.Lock() + for _, t := range topics { + delete(cl.mappedMeta, t) + } + cl.mappedMetaMu.Unlock() +} + +// PurgeTopicsFromProducing internally removes all internal information for +// producing about the input topics. This runs the producer bit of logic that +// is documented in [PurgeTopicsFromClient]; see that function for more +// details. +func (cl *Client) PurgeTopicsFromProducing(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) + cl.blockingMetadataFn(func() { + cl.producer.purgeTopics(topics) + }) +} + +// PurgeTopicsFromConsuming internally removes all internal information for +// consuming about the input topics. This runs the consumer bit of logic that +// is documented in [PurgeTopicsFromClient]; see that function for more +// details. +func (cl *Client) PurgeTopicsFromConsuming(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) + cl.blockingMetadataFn(func() { + cl.consumer.purgeTopics(topics) + }) +} + +// Parse broker IP/host and port from a string, using the default Kafka port if +// unspecified. Supported address formats: +// +// - IPv4 host/IP without port: "127.0.0.1", "localhost" +// - IPv4 host/IP with port: "127.0.0.1:1234", "localhost:1234" +// - IPv6 IP without port: "[2001:1000:2000::1]", "::1" +// - IPv6 IP with port: "[2001:1000:2000::1]:1234" +func parseBrokerAddr(addr string) (hostport, error) { + const defaultKafkaPort = 9092 + + // Bracketed IPv6 + if strings.IndexByte(addr, '[') == 0 { + parts := strings.Split(addr[1:], "]") + if len(parts) != 2 { + return hostport{}, fmt.Errorf("invalid addr: %s", addr) + } + // No port specified -> use default + if len(parts[1]) == 0 { + return hostport{parts[0], defaultKafkaPort}, nil + } + port, err := strconv.ParseInt(parts[1][1:], 10, 32) + if err != nil { + return hostport{}, fmt.Errorf("unable to parse port from addr: %w", err) + } + return hostport{parts[0], int32(port)}, nil + } + + // IPv4 with no port + if strings.IndexByte(addr, ':') == -1 { + return hostport{addr, defaultKafkaPort}, nil + } + + // Either a IPv6 literal ("::1"), IP:port or host:port + // Try to parse as IP:port or host:port + h, p, err := net.SplitHostPort(addr) + if err != nil { + return hostport{addr, defaultKafkaPort}, nil //nolint:nilerr // ipv6 literal -- use default kafka port + } + port, err := strconv.ParseInt(p, 10, 32) + if err != nil { + return hostport{}, fmt.Errorf("unable to parse port from addr: %w", err) + } + return hostport{h, int32(port)}, nil +} + +type connTimeouter struct { + def time.Duration + joinMu sync.Mutex + lastRebalanceTimeout time.Duration +} + +func (c *connTimeouter) timeouts(req kmsg.Request) (r, w time.Duration) { + def := c.def + millis := func(m int32) time.Duration { return time.Duration(m) * time.Millisecond } + switch t := req.(type) { + default: + if timeoutRequest, ok := req.(kmsg.TimeoutRequest); ok { + timeoutMillis := timeoutRequest.Timeout() + return def + millis(timeoutMillis), def + } + return def, def + + case *produceRequest: + return def + millis(t.timeout), def + case *fetchRequest: + return def + millis(t.maxWait), def + case *kmsg.FetchRequest: + return def + millis(t.MaxWaitMillis), def + + // Join and sync can take a long time. Sync has no notion of + // timeouts, but since the flow of requests should be first + // join, then sync, we can stash the timeout from the join. + + case *kmsg.JoinGroupRequest: + c.joinMu.Lock() + c.lastRebalanceTimeout = millis(t.RebalanceTimeoutMillis) + c.joinMu.Unlock() + + return def + millis(t.RebalanceTimeoutMillis), def + case *kmsg.SyncGroupRequest: + read := def + c.joinMu.Lock() + if c.lastRebalanceTimeout != 0 { + read = c.lastRebalanceTimeout + } + c.joinMu.Unlock() + + return read, def + } +} + +func (cl *Client) reinitAnyBrokerOrd() { + cl.anyBrokerOrd = append(cl.anyBrokerOrd[:0], make([]int32, len(cl.brokers))...) + for i := range cl.anyBrokerOrd { + cl.anyBrokerOrd[i] = int32(i) + } + cl.rng(func(r *rand.Rand) { + r.Shuffle(len(cl.anyBrokerOrd), func(i, j int) { + cl.anyBrokerOrd[i], cl.anyBrokerOrd[j] = cl.anyBrokerOrd[j], cl.anyBrokerOrd[i] + }) + }) +} + +// broker returns a random broker from all brokers ever known. +func (cl *Client) broker() *broker { + cl.brokersMu.Lock() + defer cl.brokersMu.Unlock() + + // Every time we loop through all discovered brokers, we issue one + // request to the next seed. This ensures that if all discovered + // brokers are down, we will *eventually* loop through seeds and + // hopefully have a reachable seed. + var b *broker + + if len(cl.anyBrokerOrd) > 0 { + b = cl.brokers[cl.anyBrokerOrd[0]] + cl.anyBrokerOrd = cl.anyBrokerOrd[1:] + return b + } + + seeds := cl.loadSeeds() + cl.anySeedIdx %= int32(len(seeds)) + b = seeds[cl.anySeedIdx] + cl.anySeedIdx++ + + // If we have brokers, we ranged past discovered brokers. + // We now reset the anyBrokerOrd to begin ranging through + // discovered brokers again. If there are still no brokers, + // this reinit will do nothing and we will keep looping seeds. + cl.reinitAnyBrokerOrd() + return b +} + +func (cl *Client) waitTries(ctx context.Context, backoff time.Duration) bool { + after := time.NewTimer(backoff) + defer after.Stop() + select { + case <-ctx.Done(): + return false + case <-cl.ctx.Done(): + return false + case <-after.C: + return true + } +} + +// A broker may sometimes indicate it supports offset for leader epoch v2+ when +// it does not. We need to catch that and avoid issuing offset for leader +// epoch, because we will just loop continuously failing. We do not catch every +// case, such as when a person explicitly assigns offsets with epochs, but we +// catch a few areas that would be returned from a broker itself. +// +// This function is always used *after* at least one request has been issued. +// +// NOTE: This is a weak check; we check if any broker in the cluster supports +// the request. We use this function in three locations: +// +// 1. When using the LeaderEpoch returned in a metadata response. This guards +// against buggy brokers that return 0 rather than -1 even if they do not +// support OffsetForLeaderEpoch. If any support, the cluster is in the +// middle of an upgrade and we can start using the epoch. +// 2. When deciding whether to keep LeaderEpoch from fetched offsets. +// Realistically, clients should only commit epochs if the cluster supports +// them. +// 3. When receiving OffsetOutOfRange when follower fetching and we fetched +// past the end. +// +// In any of these cases, if we OffsetForLeaderEpoch against a broker that does +// not support (even though one in the cluster does), we will loop fail until +// the rest of the cluster is upgraded and supports the request. +func (cl *Client) supportsOffsetForLeaderEpoch() bool { + return cl.supportsKeyVersion(int16(kmsg.OffsetForLeaderEpoch), 2) +} + +// A broker may not support some requests we want to make. This function checks +// support. This should only be used *after* at least one successful response. +func (cl *Client) supportsKeyVersion(key, version int16) bool { + cl.brokersMu.RLock() + defer cl.brokersMu.RUnlock() + + for _, brokers := range [][]*broker{ + cl.brokers, + cl.loadSeeds(), + } { + for _, b := range brokers { + if v := b.loadVersions(); v != nil && v.versions[key] >= version { + return true + } + } + } + return false +} + +// fetchBrokerMetadata issues a metadata request solely for broker information. +func (cl *Client) fetchBrokerMetadata(ctx context.Context) error { + cl.fetchingBrokersMu.Lock() + wait := cl.fetchingBrokers + if wait != nil { + cl.fetchingBrokersMu.Unlock() + <-wait.done + return wait.err + } + wait = &struct { + done chan struct{} + err error + }{done: make(chan struct{})} + cl.fetchingBrokers = wait + cl.fetchingBrokersMu.Unlock() + + defer func() { + cl.fetchingBrokersMu.Lock() + defer cl.fetchingBrokersMu.Unlock() + cl.fetchingBrokers = nil + close(wait.done) + }() + + _, _, wait.err = cl.fetchMetadata(ctx, kmsg.NewPtrMetadataRequest(), true) + return wait.err +} + +func (cl *Client) fetchMetadataForTopics(ctx context.Context, all bool, topics []string) (*broker, *kmsg.MetadataResponse, error) { + req := kmsg.NewPtrMetadataRequest() + req.AllowAutoTopicCreation = cl.cfg.allowAutoTopicCreation + if all { + req.Topics = nil + } else if len(topics) == 0 { + req.Topics = []kmsg.MetadataRequestTopic{} + } else { + for _, topic := range topics { + reqTopic := kmsg.NewMetadataRequestTopic() + reqTopic.Topic = kmsg.StringPtr(topic) + req.Topics = append(req.Topics, reqTopic) + } + } + return cl.fetchMetadata(ctx, req, true) +} + +func (cl *Client) fetchMetadata(ctx context.Context, req *kmsg.MetadataRequest, limitRetries bool) (*broker, *kmsg.MetadataResponse, error) { + r := cl.retryable() + + // We limit retries for internal metadata refreshes, because these do + // not need to retry forever and are usually blocking *other* requests. + // e.g., producing bumps load errors when metadata returns, so 3 + // failures here will correspond to 1 bumped error count. To make the + // number more accurate, we should *never* retry here, but this is + // pretty intolerant of immediately-temporary network issues. Rather, + // we use a small count of 3 retries, which with the default backoff, + // will be <2s of retrying. This is still intolerant of temporary + // failures, but it does allow recovery from a dns issue / bad path. + if limitRetries { + r.limitRetries = 3 + } + + meta, err := req.RequestWith(ctx, r) + if err == nil { + if meta.ControllerID >= 0 { + cl.controllerIDMu.Lock() + cl.controllerID = meta.ControllerID + cl.controllerIDMu.Unlock() + } + cl.updateBrokers(meta.Brokers) + } + return r.last, meta, err +} + +// updateBrokers is called with the broker portion of every metadata response. +// All metadata responses contain all known live brokers, so we can always +// use the response. +func (cl *Client) updateBrokers(brokers []kmsg.MetadataResponseBroker) { + sort.Slice(brokers, func(i, j int) bool { return brokers[i].NodeID < brokers[j].NodeID }) + newBrokers := make([]*broker, 0, len(brokers)) + + cl.brokersMu.Lock() + defer cl.brokersMu.Unlock() + + if cl.stopBrokers { + return + } + + for len(brokers) > 0 && len(cl.brokers) > 0 { + ob := cl.brokers[0] + nb := brokers[0] + + switch { + case ob.meta.NodeID < nb.NodeID: + ob.stopForever() + cl.brokers = cl.brokers[1:] + + case ob.meta.NodeID == nb.NodeID: + if !ob.meta.equals(nb) { + ob.stopForever() + ob = cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack) + } + newBrokers = append(newBrokers, ob) + cl.brokers = cl.brokers[1:] + brokers = brokers[1:] + + case ob.meta.NodeID > nb.NodeID: + newBrokers = append(newBrokers, cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack)) + brokers = brokers[1:] + } + } + + for len(cl.brokers) > 0 { + ob := cl.brokers[0] + ob.stopForever() + cl.brokers = cl.brokers[1:] + } + + for len(brokers) > 0 { + nb := brokers[0] + newBrokers = append(newBrokers, cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack)) + brokers = brokers[1:] + } + + cl.brokers = newBrokers + cl.reinitAnyBrokerOrd() +} + +// CloseAllowingRebalance allows rebalances, leaves any group, and closes all +// connections and goroutines. This function is only useful if you are using +// the BlockRebalanceOnPoll option. Close itself does not allow rebalances and +// will hang if you polled, did not allow rebalances, and want to close. Close +// does not automatically allow rebalances because leaving a group causes a +// revoke, and the client does not assume that the final revoke is concurrency +// safe. The CloseAllowingRebalance function exists a a shortcut to opt into +// allowing rebalance while closing. +func (cl *Client) CloseAllowingRebalance() { + cl.AllowRebalance() + cl.Close() +} + +// Close leaves any group and closes all connections and goroutines. This +// function waits for the group to be left. If you want to force leave a group +// immediately and ensure a speedy shutdown you can use LeaveGroupContext first +// (and then Close will be immediate). +// +// If you are group consuming and have overridden the default +// OnPartitionsRevoked, you must manually commit offsets before closing the +// client. +// +// If you are using the BlockRebalanceOnPoll option and have polled, this +// function does not automatically allow rebalancing. You must AllowRebalance +// before calling this function. Internally, this function leaves the group, +// and leaving a group causes a rebalance so that you can get one final +// notification of revoked partitions. If you want to automatically allow +// rebalancing, use CloseAllowingRebalance. +func (cl *Client) Close() { + cl.close(cl.ctx) +} + +func (cl *Client) close(ctx context.Context) (rerr error) { + defer cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookClientClosed); ok { + h.OnClientClosed(cl) + } + }) + + c := &cl.consumer + c.kill.Store(true) + if c.g != nil { + rerr = cl.LeaveGroupContext(ctx) + } else if c.d != nil { + c.mu.Lock() // lock for assign + c.assignPartitions(nil, assignInvalidateAll, nil, "") // we do not use a log message when not in a group + c.mu.Unlock() + } + + // After the above, consumers cannot consume anymore. LeaveGroup + // internally assigns nil, which uses noConsumerSession, which prevents + // loopFetch from starting. Assigning also waits for the prior session + // to be complete, meaning loopFetch cannot be running. + + sessCloseCtx, sessCloseCancel := context.WithTimeout(ctx, time.Second) + var wg sync.WaitGroup + cl.allSinksAndSources(func(sns sinkAndSource) { + if sns.source.session.id != 0 { + sns := sns + wg.Add(1) + go func() { + defer wg.Done() + sns.source.killSessionOnClose(sessCloseCtx) + }() + } + }) + wg.Wait() + sessCloseCancel() + + // Now we kill the client context and all brokers, ensuring all + // requests fail. This will finish all producer callbacks and + // stop the metadata loop. + cl.ctxCancel() + cl.brokersMu.Lock() + cl.stopBrokers = true + for _, broker := range cl.brokers { + broker.stopForever() + } + cl.brokersMu.Unlock() + for _, broker := range cl.loadSeeds() { + broker.stopForever() + } + + // Wait for metadata to quit so we know no more erroring topic + // partitions will be created. After metadata has quit, we can + // safely stop sinks and sources, as no more will be made. + <-cl.metadone + + for _, sns := range cl.sinksAndSources { + sns.sink.maybeDrain() // awaken anything in backoff + sns.source.maybeConsume() // same + } + + cl.failBufferedRecords(ErrClientClosed) + + // We need one final poll: if any sources buffered a fetch, then the + // manageFetchConcurrency loop only exits when all fetches have been + // drained, because draining a fetch is what decrements an "active" + // fetch. PollFetches with `nil` is instant. + cl.PollFetches(nil) + + for _, s := range cl.cfg.sasls { + if closing, ok := s.(sasl.ClosingMechanism); ok { + closing.Close() + } + } + + return rerr +} + +// Request issues a request to Kafka, waiting for and returning the response. +// If a retryable network error occurs, or if a retryable group / transaction +// coordinator error occurs, the request is retried. All other errors are +// returned. +// +// If the request is an admin request, this will issue it to the Kafka +// controller. If the controller ID is unknown, this will attempt to fetch it. +// If the fetch errors, this will return an unknown controller error. +// +// If the request is a group or transaction coordinator request, this will +// issue the request to the appropriate group or transaction coordinator. +// +// For transaction requests, the request is issued to the transaction +// coordinator. However, if the request is an init producer ID request and the +// request has no transactional ID, the request goes to any broker. +// +// Some requests need to be split and sent to many brokers. For these requests, +// it is *highly* recommended to use RequestSharded. Not all responses from +// many brokers can be cleanly merged. However, for the requests that are +// split, this does attempt to merge them in a sane way. +// +// The following requests are split: +// +// ListOffsets +// OffsetFetch (if using v8+ for Kafka 3.0+) +// FindCoordinator (if using v4+ for Kafka 3.0+) +// DescribeGroups +// ListGroups +// DeleteRecords +// OffsetForLeaderEpoch +// DescribeConfigs +// AlterConfigs +// AlterReplicaLogDirs +// DescribeLogDirs +// DeleteGroups +// IncrementalAlterConfigs +// DescribeProducers +// DescribeTransactions +// ListTransactions +// +// Kafka 3.0 introduced batch OffsetFetch and batch FindCoordinator requests. +// This function is forward and backward compatible: old requests will be +// batched as necessary, and batched requests will be split as necessary. It is +// recommended to always use batch requests for simplicity. +// +// In short, this method tries to do the correct thing depending on what type +// of request is being issued. +// +// The passed context can be used to cancel a request and return early. Note +// that if the request was written to Kafka but the context canceled before a +// response is received, Kafka may still operate on the received request. +// +// If using this function to issue kmsg.ProduceRequest's, you must configure +// the client with the same RequiredAcks option that you use in the request. +// If you are issuing produce requests with 0 acks, you must configure the +// client with the same timeout you use in the request. The client will +// internally rewrite the incoming request's acks to match the client's +// configuration, and it will rewrite the timeout millis if the acks is 0. It +// is strongly recommended to not issue raw kmsg.ProduceRequest's. +func (cl *Client) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + resps, merge := cl.shardedRequest(ctx, req) + // If there is no merge function, only one request was issued directly + // to a broker. Return the resp and err directly. + if merge == nil { + return resps[0].Resp, resps[0].Err + } + return merge(resps) +} + +func (cl *Client) retryable() *retryable { + return cl.retryableBrokerFn(func() (*broker, error) { return cl.broker(), nil }) +} + +func (cl *Client) retryableBrokerFn(fn func() (*broker, error)) *retryable { + return &retryable{cl: cl, br: fn} +} + +func (cl *Client) shouldRetry(tries int, err error) bool { + return (kerr.IsRetriable(err) || isRetryableBrokerErr(err)) && int64(tries) < cl.cfg.retries +} + +func (cl *Client) shouldRetryNext(tries int, err error) bool { + return isSkippableBrokerErr(err) && int64(tries) < cl.cfg.retries +} + +type retryable struct { + cl *Client + br func() (*broker, error) + last *broker + + // If non-zero, limitRetries may specify a smaller # of retries than + // the client RequestRetries number. This is used for internal requests + // that can fail / do not need to retry forever. + limitRetries int + + // parseRetryErr, if non-nil, can delete stale cached brokers. We do + // *not* return the error from this function to the caller, but we do + // use it to potentially retry. It is not necessary, but also not + // harmful, to return the input error. + parseRetryErr func(kmsg.Response, error) error +} + +type failDial struct{ fails int8 } + +// The controller and group/txn coordinators are cached. If dialing the broker +// repeatedly fails, we need to forget our cache to force a re-load: the broker +// may have completely died. +func (d *failDial) isRepeatedDialFail(err error) bool { + if isAnyDialErr(err) { + d.fails++ + if d.fails == 3 { + d.fails = 0 + return true + } + } + return false +} + +func (r *retryable) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + tries := 0 + tryStart := time.Now() + retryTimeout := r.cl.cfg.retryTimeout(req.Key()) + + next, nextErr := r.br() +start: + tries++ + br, err := next, nextErr + r.last = br + var resp kmsg.Response + var retryErr error + if err == nil { + resp, err = r.last.waitResp(ctx, req) + if r.parseRetryErr != nil { + retryErr = r.parseRetryErr(resp, err) + } + } + + if err != nil || retryErr != nil { + if r.limitRetries == 0 || tries < r.limitRetries { + backoff := r.cl.cfg.retryBackoff(tries) + if retryTimeout == 0 || time.Now().Add(backoff).Sub(tryStart) <= retryTimeout { + // If this broker / request had a retryable error, we can + // just retry now. If the error is *not* retryable but + // is a broker-specific network error, and the next + // broker is different than the current, we also retry. + if r.cl.shouldRetry(tries, err) || r.cl.shouldRetry(tries, retryErr) { + r.cl.cfg.logger.Log(LogLevelDebug, "retrying request", + "tries", tries, + "backoff", backoff, + "request_error", err, + "response_error", retryErr, + ) + if r.cl.waitTries(ctx, backoff) { + next, nextErr = r.br() + goto start + } + } else if r.cl.shouldRetryNext(tries, err) { + next, nextErr = r.br() + if next != br && r.cl.waitTries(ctx, backoff) { + goto start + } + } + } + } + } + return resp, err +} + +// ResponseShard ties together a request with either the response it received +// or an error that prevented a response from being received. +type ResponseShard struct { + // Meta contains the broker that this request was issued to, or an + // unknown (node ID -1) metadata if the request could not be issued. + // + // Requests can fail to even be issued if an appropriate broker cannot + // be loaded of if the client cannot understand the request. + Meta BrokerMetadata + + // Req is the request that was issued to this broker. + Req kmsg.Request + + // Resp is the response received from the broker, if any. + Resp kmsg.Response + + // Err, if non-nil, is the error that prevented a response from being + // received or the request from being issued. + Err error +} + +// RequestSharded performs the same logic as Request, but returns all responses +// from any broker that the request was split to. This always returns at least +// one shard. If the request does not need to be issued (describing no groups), +// this issues the request to a random broker just to ensure that one shard +// exists. +// +// There are only a few requests that are strongly recommended to explicitly +// use RequestSharded; the rest can by default use Request. These few requests +// are mentioned in the documentation for Request. +// +// If, in the process of splitting a request, some topics or partitions are +// found to not exist, or Kafka replies that a request should go to a broker +// that does not exist, all those non-existent pieces are grouped into one +// request to the first seed broker. This will show up as a seed broker node ID +// (min int32) and the response will likely contain purely errors. +// +// The response shards are ordered by broker metadata. +func (cl *Client) RequestSharded(ctx context.Context, req kmsg.Request) []ResponseShard { + resps, _ := cl.shardedRequest(ctx, req) + sort.Slice(resps, func(i, j int) bool { + l := &resps[i].Meta + r := &resps[j].Meta + + if l.NodeID < r.NodeID { + return true + } + if r.NodeID < l.NodeID { + return false + } + if l.Host < r.Host { + return true + } + if r.Host < l.Host { + return false + } + if l.Port < r.Port { + return true + } + if r.Port < l.Port { + return false + } + if l.Rack == nil { + return true + } + if r.Rack == nil { + return false + } + return *l.Rack < *r.Rack + }) + return resps +} + +type shardMerge func([]ResponseShard) (kmsg.Response, error) + +func (cl *Client) shardedRequest(ctx context.Context, req kmsg.Request) ([]ResponseShard, shardMerge) { + ctx, cancel := context.WithCancel(ctx) + done := make(chan struct{}) + defer close(done) + go func() { + defer cancel() + select { + case <-done: + case <-ctx.Done(): + case <-cl.ctx.Done(): + } + }() + + // First, handle any sharded request. This comes before the conditional + // below because this handles two group requests, which we do not want + // to fall into the handleCoordinatorReq logic. + switch t := req.(type) { + case *kmsg.ListOffsetsRequest, // key 2 + *kmsg.OffsetFetchRequest, // key 9 + *kmsg.FindCoordinatorRequest, // key 10 + *kmsg.DescribeGroupsRequest, // key 15 + *kmsg.ListGroupsRequest, // key 16 + *kmsg.DeleteRecordsRequest, // key 21 + *kmsg.OffsetForLeaderEpochRequest, // key 23 + *kmsg.AddPartitionsToTxnRequest, // key 24 + *kmsg.WriteTxnMarkersRequest, // key 27 + *kmsg.DescribeConfigsRequest, // key 32 + *kmsg.AlterConfigsRequest, // key 33 + *kmsg.AlterReplicaLogDirsRequest, // key 34 + *kmsg.DescribeLogDirsRequest, // key 35 + *kmsg.DeleteGroupsRequest, // key 42 + *kmsg.IncrementalAlterConfigsRequest, // key 44 + *kmsg.DescribeProducersRequest, // key 61 + *kmsg.DescribeTransactionsRequest, // key 65 + *kmsg.ListTransactionsRequest: // key 66 + return cl.handleShardedReq(ctx, req) + + case *kmsg.MetadataRequest: + // We hijack any metadata request so as to populate our + // own brokers and controller ID. + br, resp, err := cl.fetchMetadata(ctx, t, false) + return shards(shard(br, req, resp, err)), nil + + case kmsg.AdminRequest: + return shards(cl.handleAdminReq(ctx, t)), nil + + case kmsg.GroupCoordinatorRequest, + kmsg.TxnCoordinatorRequest: + return shards(cl.handleCoordinatorReq(ctx, t)), nil + + case *kmsg.ApiVersionsRequest: + // As of v3, software name and version are required. + // If they are missing, we use the config options. + if t.ClientSoftwareName == "" && t.ClientSoftwareVersion == "" { + dup := *t + dup.ClientSoftwareName = cl.cfg.softwareName + dup.ClientSoftwareVersion = cl.cfg.softwareVersion + req = &dup + } + } + + // All other requests not handled above can be issued to any broker + // with the default retryable logic. + r := cl.retryable() + resp, err := r.Request(ctx, req) + return shards(shard(r.last, req, resp, err)), nil +} + +func shard(br *broker, req kmsg.Request, resp kmsg.Response, err error) ResponseShard { + if br == nil { // the broker could be nil if loading the broker failed. + return ResponseShard{unknownBrokerMetadata, req, resp, err} + } + return ResponseShard{br.meta, req, resp, err} +} + +func shards(shard ...ResponseShard) []ResponseShard { + return shard +} + +func findBroker(candidates []*broker, node int32) *broker { + n := sort.Search(len(candidates), func(n int) bool { return candidates[n].meta.NodeID >= node }) + var b *broker + if n < len(candidates) { + c := candidates[n] + if c.meta.NodeID == node { + b = c + } + } + return b +} + +// brokerOrErr returns the broker for ID or the error if the broker does not +// exist. +// +// If tryLoad is true and the broker does not exist, this attempts a broker +// metadata load once before failing. If the metadata load fails, this returns +// that error. +func (cl *Client) brokerOrErr(ctx context.Context, id int32, err error) (*broker, error) { + if id < 0 { + return nil, err + } + + tryLoad := ctx != nil + tries := 0 +start: + var broker *broker + if id < 0 { + broker = findBroker(cl.loadSeeds(), id) + } else { + cl.brokersMu.RLock() + broker = findBroker(cl.brokers, id) + cl.brokersMu.RUnlock() + } + + if broker == nil { + if tryLoad { + if loadErr := cl.fetchBrokerMetadata(ctx); loadErr != nil { + return nil, loadErr + } + // We will retry loading up to two times, if we load broker + // metadata twice successfully but neither load has the broker + // we are looking for, then we say our broker does not exist. + tries++ + if tries < 2 { + goto start + } + } + return nil, err + } + return broker, nil +} + +// controller returns the controller broker, forcing a broker load if +// necessary. +func (cl *Client) controller(ctx context.Context) (b *broker, err error) { + get := func() int32 { + cl.controllerIDMu.Lock() + defer cl.controllerIDMu.Unlock() + return cl.controllerID + } + + defer func() { + if ec := (*errUnknownController)(nil); errors.As(err, &ec) { + cl.forgetControllerID(ec.id) + } + }() + + var id int32 + if id = get(); id < 0 { + if err := cl.fetchBrokerMetadata(ctx); err != nil { + return nil, err + } + if id = get(); id < 0 { + return nil, &errUnknownController{id} + } + } + + return cl.brokerOrErr(nil, id, &errUnknownController{id}) +} + +// forgetControllerID is called once an admin requests sees NOT_CONTROLLER. +func (cl *Client) forgetControllerID(id int32) { + cl.controllerIDMu.Lock() + defer cl.controllerIDMu.Unlock() + if cl.controllerID == id { + cl.controllerID = unknownControllerID + } +} + +const ( + coordinatorTypeGroup int8 = 0 + coordinatorTypeTxn int8 = 1 +) + +type coordinatorKey struct { + name string + typ int8 +} + +type coordinatorLoad struct { + loadWait chan struct{} + node int32 + err error +} + +func (cl *Client) loadCoordinator(ctx context.Context, typ int8, key string) (*broker, error) { + berr := cl.loadCoordinators(ctx, typ, key)[key] + return berr.b, berr.err +} + +func (cl *Client) loadCoordinators(ctx context.Context, typ int8, keys ...string) map[string]brokerOrErr { + mch := make(chan map[string]brokerOrErr, 1) + go func() { mch <- cl.doLoadCoordinators(ctx, typ, keys...) }() + select { + case m := <-mch: + return m + case <-ctx.Done(): + m := make(map[string]brokerOrErr, len(keys)) + for _, k := range keys { + m[k] = brokerOrErr{nil, ctx.Err()} + } + return m + } +} + +// doLoadCoordinators uses the caller context to cancel loading metadata +// (brokerOrErr), but we use the client context to actually issue the request. +// There should be only one direct call to doLoadCoordinators, just above in +// loadCoordinator. It is possible for two requests to be loading the same +// coordinator (in fact, that's the point of this function -- collapse these +// requests). We do not want the first request canceling it's context to cause +// errors for the second request. +// +// It is ok to leave FindCoordinator running even if the caller quits. Worst +// case, we just cache things for some time in the future; yay. +func (cl *Client) doLoadCoordinators(ctx context.Context, typ int8, keys ...string) map[string]brokerOrErr { + m := make(map[string]brokerOrErr, len(keys)) + if len(keys) == 0 { + return m + } + + toRequest := make(map[string]bool, len(keys)) // true == bypass the cache + for _, key := range keys { + toRequest[key] = false + } + + // For each of these keys, we have two cases: + // + // 1) The key is cached. It is either loading or loaded. We do not + // request the key ourselves; we wait for the load to finish. + // + // 2) The key is not cached, and we request it. + // + // If a key is cached but the coordinator no longer exists for us, we + // re-request to refresh the coordinator by setting toRequest[key] to + // true (bypass cache). + // + // If we ever request a key ourselves, we do not request it again. We + // ensure this by deleting from toRequest. We also delete if the key + // was cached with no error. + // + // We could have some keys cached and some that need to be requested. + // We issue a request but do not request what is cached. + // + // Lastly, we only ever trigger one metadata update, which happens if + // we have an unknown coordinator after we load coordinators. + var hasLoadedBrokers bool + for len(toRequest) > 0 { + var loadWait chan struct{} + load2key := make(map[*coordinatorLoad][]string) + + cl.coordinatorsMu.Lock() + for key, bypassCache := range toRequest { + c, ok := cl.coordinators[coordinatorKey{key, typ}] + if !ok || bypassCache { + if loadWait == nil { + loadWait = make(chan struct{}) + } + c = &coordinatorLoad{ + loadWait: loadWait, + err: errors.New("coordinator was not returned in broker response"), + } + cl.coordinators[coordinatorKey{key, typ}] = c + } + load2key[c] = append(load2key[c], key) + } + cl.coordinatorsMu.Unlock() + + if loadWait == nil { // all coordinators were cached + hasLoadedBrokers = cl.waitCoordinatorLoad(ctx, typ, load2key, !hasLoadedBrokers, toRequest, m) + continue + } + + key2load := make(map[string]*coordinatorLoad) + req := kmsg.NewPtrFindCoordinatorRequest() + req.CoordinatorType = typ + for c, keys := range load2key { + if c.loadWait == loadWait { // if this is our wait, this is ours to request + req.CoordinatorKeys = append(req.CoordinatorKeys, keys...) + for _, key := range keys { + key2load[key] = c + delete(toRequest, key) + } + } + } + + cl.cfg.logger.Log(LogLevelDebug, "prepared to issue find coordinator request", + "coordinator_type", typ, + "coordinator_keys", req.CoordinatorKeys, + ) + + shards := cl.RequestSharded(cl.ctx, req) + + for _, shard := range shards { + if shard.Err != nil { + req := shard.Req.(*kmsg.FindCoordinatorRequest) + for _, key := range req.CoordinatorKeys { + c, ok := key2load[key] + if ok { + c.err = shard.Err + } + } + } else { + resp := shard.Resp.(*kmsg.FindCoordinatorResponse) + for _, rc := range resp.Coordinators { + c, ok := key2load[rc.Key] + if ok { + c.err = kerr.ErrorForCode(rc.ErrorCode) + c.node = rc.NodeID + } + } + } + } + + // For anything we loaded, if it has a load failure (including + // not being replied to), we remove the key from the cache. We + // do not want to cache erroring values. + // + // We range key2load, which contains only coordinators we are + // responsible for loading. + cl.coordinatorsMu.Lock() + for key, c := range key2load { + if c.err != nil { + ck := coordinatorKey{key, typ} + if loading, ok := cl.coordinators[ck]; ok && loading == c { + delete(cl.coordinators, ck) + } + } + } + cl.coordinatorsMu.Unlock() + + close(loadWait) + hasLoadedBrokers = cl.waitCoordinatorLoad(ctx, typ, load2key, !hasLoadedBrokers, toRequest, m) + } + return m +} + +// After some prep work, we wait for coordinators to load. We update toRequest +// values with true if the caller should bypass cache and re-load these +// coordinators. +// +// This returns if we load brokers, and populates m with results. +func (cl *Client) waitCoordinatorLoad(ctx context.Context, typ int8, load2key map[*coordinatorLoad][]string, shouldLoadBrokers bool, toRequest map[string]bool, m map[string]brokerOrErr) bool { + var loadedBrokers bool + for c, keys := range load2key { + <-c.loadWait + for _, key := range keys { + if c.err != nil { + delete(toRequest, key) + m[key] = brokerOrErr{nil, c.err} + continue + } + + var brokerCtx context.Context + if shouldLoadBrokers && !loadedBrokers { + brokerCtx = ctx + loadedBrokers = true + } + + b, err := cl.brokerOrErr(brokerCtx, c.node, &errUnknownCoordinator{c.node, coordinatorKey{key, typ}}) + if err != nil { + if _, exists := toRequest[key]; exists { + toRequest[key] = true + continue + } + // If the key does not exist, we just loaded this + // coordinator and also the brokers. We do not + // re-request. + } + delete(toRequest, key) + m[key] = brokerOrErr{b, err} + } + } + return loadedBrokers +} + +func (cl *Client) maybeDeleteStaleCoordinator(name string, typ int8, err error) bool { + switch { + case errors.Is(err, kerr.CoordinatorNotAvailable), + errors.Is(err, kerr.CoordinatorLoadInProgress), + errors.Is(err, kerr.NotCoordinator): + cl.deleteStaleCoordinator(name, typ) + return true + } + return false +} + +func (cl *Client) deleteStaleCoordinator(name string, typ int8) { + cl.coordinatorsMu.Lock() + defer cl.coordinatorsMu.Unlock() + k := coordinatorKey{name, typ} + v := cl.coordinators[k] + if v == nil { + return + } + select { + case <-v.loadWait: + delete(cl.coordinators, k) + default: + // We are actively reloading this coordinator. + } +} + +type brokerOrErr struct { + b *broker + err error +} + +func (cl *Client) handleAdminReq(ctx context.Context, req kmsg.Request) ResponseShard { + // Loading a controller can perform some wait; we accept that and do + // not account for the retries or the time to load the controller as + // part of the retries / time to issue the req. + r := cl.retryableBrokerFn(func() (*broker, error) { + return cl.controller(ctx) + }) + + // The only request that can break mapped metadata is CreatePartitions, + // because our mapping will still be "valid" but behind the scenes, + // more partitions exist. If CreatePartitions is going through this + // client, we preemptively delete any mapping for these topics. + if t, ok := req.(*kmsg.CreatePartitionsRequest); ok { + var topics []string + for i := range t.Topics { + topics = append(topics, t.Topics[i].Topic) + } + cl.maybeDeleteMappedMetadata(false, topics...) + } + + var d failDial + r.parseRetryErr = func(resp kmsg.Response, err error) error { + if err != nil { + if d.isRepeatedDialFail(err) { + cl.forgetControllerID(r.last.meta.NodeID) + } + return err + } + var code int16 + switch t := resp.(type) { + case *kmsg.CreateTopicsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.DeleteTopicsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.CreatePartitionsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.ElectLeadersResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.AlterPartitionAssignmentsResponse: + code = t.ErrorCode + case *kmsg.ListPartitionReassignmentsResponse: + code = t.ErrorCode + case *kmsg.AlterUserSCRAMCredentialsResponse: + if len(t.Results) > 0 { + code = t.Results[0].ErrorCode + } + case *kmsg.VoteResponse: + code = t.ErrorCode + case *kmsg.BeginQuorumEpochResponse: + code = t.ErrorCode + case *kmsg.EndQuorumEpochResponse: + code = t.ErrorCode + case *kmsg.DescribeQuorumResponse: + code = t.ErrorCode + case *kmsg.AlterPartitionResponse: + code = t.ErrorCode + case *kmsg.UpdateFeaturesResponse: + code = t.ErrorCode + case *kmsg.EnvelopeResponse: + code = t.ErrorCode + } + if err := kerr.ErrorForCode(code); errors.Is(err, kerr.NotController) { + // There must be a last broker if we were able to issue + // the request and get a response. + cl.forgetControllerID(r.last.meta.NodeID) + return err + } + return nil + } + + resp, err := r.Request(ctx, req) + return shard(r.last, req, resp, err) +} + +// handleCoordinatorReq issues simple (non-shardable) group or txn requests. +func (cl *Client) handleCoordinatorReq(ctx context.Context, req kmsg.Request) ResponseShard { + switch t := req.(type) { + default: + // All group requests should be listed below, so if it isn't, + // then we do not know what this request is. + return shard(nil, req, nil, errors.New("client is too old; this client does not know what to do with this request")) + + ///////// + // TXN // -- all txn reqs are simple + ///////// + + case *kmsg.InitProducerIDRequest: + if t.TransactionalID != nil { + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, *t.TransactionalID, req) + } + // InitProducerID can go to any broker if the transactional ID + // is nil. By using handleReqWithCoordinator, we get the + // retryable-error parsing, even though we are not actually + // using a defined txn coordinator. This is fine; by passing no + // names, we delete no coordinator. + coordinator, resp, err := cl.handleReqWithCoordinator(ctx, func() (*broker, error) { return cl.broker(), nil }, coordinatorTypeTxn, "", req) + return shard(coordinator, req, resp, err) + case *kmsg.AddOffsetsToTxnRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, t.TransactionalID, req) + case *kmsg.EndTxnRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, t.TransactionalID, req) + + /////////// + // GROUP // -- most group reqs are simple + /////////// + + case *kmsg.OffsetCommitRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.TxnOffsetCommitRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.JoinGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.HeartbeatRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.LeaveGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.SyncGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.OffsetDeleteRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + } +} + +// handleCoordinatorReqSimple issues a request that contains a single group or +// txn to its coordinator. +// +// The error is inspected to see if it is a retryable error and, if so, the +// coordinator is deleted. +func (cl *Client) handleCoordinatorReqSimple(ctx context.Context, typ int8, name string, req kmsg.Request) ResponseShard { + coordinator, resp, err := cl.handleReqWithCoordinator(ctx, func() (*broker, error) { + return cl.loadCoordinator(ctx, typ, name) + }, typ, name, req) + return shard(coordinator, req, resp, err) +} + +// handleReqWithCoordinator actually issues a request to a coordinator and +// does retry handling. +// +// This avoids retries on the two group requests that need to be sharded. +func (cl *Client) handleReqWithCoordinator( + ctx context.Context, + coordinator func() (*broker, error), + typ int8, + name string, // group ID or the transactional id + req kmsg.Request, +) (*broker, kmsg.Response, error) { + r := cl.retryableBrokerFn(coordinator) + var d failDial + r.parseRetryErr = func(resp kmsg.Response, err error) error { + if err != nil { + if d.isRepeatedDialFail(err) { + cl.deleteStaleCoordinator(name, typ) + } + return err + } + var code int16 + switch t := resp.(type) { + // TXN + case *kmsg.InitProducerIDResponse: + code = t.ErrorCode + case *kmsg.AddOffsetsToTxnResponse: + code = t.ErrorCode + case *kmsg.EndTxnResponse: + code = t.ErrorCode + + // GROUP + case *kmsg.OffsetCommitResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.TxnOffsetCommitResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.JoinGroupResponse: + code = t.ErrorCode + case *kmsg.HeartbeatResponse: + code = t.ErrorCode + case *kmsg.LeaveGroupResponse: + code = t.ErrorCode + case *kmsg.SyncGroupResponse: + code = t.ErrorCode + } + + // ListGroups, OffsetFetch, DeleteGroups, DescribeGroups, and + // DescribeTransactions handled in sharding. + + if err := kerr.ErrorForCode(code); cl.maybeDeleteStaleCoordinator(name, typ, err) { + return err + } + return nil + } + + resp, err := r.Request(ctx, req) + return r.last, resp, err +} + +// Broker returns a handle to a specific broker to directly issue requests to. +// Note that there is no guarantee that this broker exists; if it does not, +// requests will fail with with an unknown broker error. +func (cl *Client) Broker(id int) *Broker { + return &Broker{ + id: int32(id), + cl: cl, + } +} + +// DiscoveredBrokers returns all brokers that were discovered from prior +// metadata responses. This does not actually issue a metadata request to load +// brokers; if you wish to ensure this returns all brokers, be sure to manually +// issue a metadata request before this. This also does not include seed +// brokers, which are internally saved under special internal broker IDs (but, +// it does include those brokers under their normal IDs as returned from a +// metadata response). +func (cl *Client) DiscoveredBrokers() []*Broker { + cl.brokersMu.RLock() + defer cl.brokersMu.RUnlock() + + var bs []*Broker + for _, broker := range cl.brokers { + bs = append(bs, &Broker{id: broker.meta.NodeID, cl: cl}) + } + return bs +} + +// SeedBrokers returns the all seed brokers. +func (cl *Client) SeedBrokers() []*Broker { + var bs []*Broker + for _, broker := range cl.loadSeeds() { + bs = append(bs, &Broker{id: broker.meta.NodeID, cl: cl}) + } + return bs +} + +// UpdateSeedBrokers updates the client's list of seed brokers. Over the course +// of a long period of time, your might replace all brokers that you originally +// specified as seeds. This command allows you to replace the client's list of +// seeds. +// +// This returns an error if any of the input addrs is not a host:port. If the +// input list is empty, the function returns without replacing the seeds. +func (cl *Client) UpdateSeedBrokers(addrs ...string) error { + if len(addrs) == 0 { + return nil + } + seeds, err := parseSeeds(addrs) + if err != nil { + return err + } + + seedBrokers := make([]*broker, 0, len(seeds)) + for i, seed := range seeds { + b := cl.newBroker(unknownSeedID(i), seed.host, seed.port, nil) + seedBrokers = append(seedBrokers, b) + } + + // We lock to guard against concurrently updating seeds; we do not need + // the lock for what this usually guards. + cl.brokersMu.Lock() + old := cl.loadSeeds() + cl.seeds.Store(seedBrokers) + cl.brokersMu.Unlock() + + for _, b := range old { + b.stopForever() + } + + return nil +} + +// Broker pairs a broker ID with a client to directly issue requests to a +// specific broker. +type Broker struct { + id int32 + cl *Client +} + +// Request issues a request to a broker. If the broker does not exist in the +// client, this returns an unknown broker error. Requests are not retried. +// +// The passed context can be used to cancel a request and return early. +// Note that if the request is not canceled before it is written to Kafka, +// you may just end up canceling and not receiving the response to what Kafka +// inevitably does. +// +// It is more beneficial to always use RetriableRequest. +func (b *Broker) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + return b.request(ctx, false, req) +} + +// RetriableRequest issues a request to a broker the same as Broker, but +// retries in the face of retryable broker connection errors. This does not +// retry on response internal errors. +func (b *Broker) RetriableRequest(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + return b.request(ctx, true, req) +} + +func (b *Broker) request(ctx context.Context, retry bool, req kmsg.Request) (kmsg.Response, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var resp kmsg.Response + var err error + done := make(chan struct{}) + + go func() { + defer close(done) + + if !retry { + var br *broker + br, err = b.cl.brokerOrErr(ctx, b.id, errUnknownBroker) + if err == nil { + resp, err = br.waitResp(ctx, req) + } + } else { + resp, err = b.cl.retryableBrokerFn(func() (*broker, error) { + return b.cl.brokerOrErr(ctx, b.id, errUnknownBroker) + }).Request(ctx, req) + } + }() + + select { + case <-done: + return resp, err + case <-ctx.Done(): + return nil, ctx.Err() + case <-b.cl.ctx.Done(): + return nil, b.cl.ctx.Err() + } +} + +////////////////////// +// REQUEST SHARDING // +////////////////////// + +// Below here lies all logic to handle requests that need to be split and sent +// to many brokers. A lot of the logic for each sharding function is very +// similar, but each sharding function uses slightly different types. + +// issueShard is a request that has been split and is ready to be sent to the +// given broker ID. +type issueShard struct { + req kmsg.Request + broker int32 + any bool + + // if non-nil, we could not map this request shard to any broker, and + // this error is the reason. + err error +} + +// sharder splits a request. +type sharder interface { + // shard splits a request and returns the requests to issue tied to the + // brokers to issue the requests to. This can return an error if there + // is some pre-loading that needs to happen. If an error is returned, + // the request that was intended for splitting is failed wholesale. + // + // Due to sharded requests not being retryable if a response is + // received, to avoid stale coordinator errors, this function should + // not use any previously cached metadata. + // + // This takes the last error if the request is being retried, which is + // currently only useful for errBrokerTooOld. + shard(context.Context, kmsg.Request, error) ([]issueShard, bool, error) + + // onResp is called on a successful response to investigate the + // response and potentially perform cleanup, and potentially returns an + // error signifying to retry. See onShardRespErr below for more + // details. + onResp(kmsg.Request, kmsg.Response) error + + // merge is a function that can be used to merge sharded responses into + // one response. This is used by the client.Request method. + merge([]ResponseShard) (kmsg.Response, error) +} + +// handleShardedReq splits and issues requests to brokers, recursively +// splitting as necessary if requests fail and need remapping. +func (cl *Client) handleShardedReq(ctx context.Context, req kmsg.Request) ([]ResponseShard, shardMerge) { + // First, determine our sharder. + var sharder sharder + switch req.(type) { + case *kmsg.ListOffsetsRequest: + sharder = &listOffsetsSharder{cl} + case *kmsg.OffsetFetchRequest: + sharder = &offsetFetchSharder{cl} + case *kmsg.FindCoordinatorRequest: + sharder = &findCoordinatorSharder{cl} + case *kmsg.DescribeGroupsRequest: + sharder = &describeGroupsSharder{cl} + case *kmsg.ListGroupsRequest: + sharder = &listGroupsSharder{cl} + case *kmsg.DeleteRecordsRequest: + sharder = &deleteRecordsSharder{cl} + case *kmsg.OffsetForLeaderEpochRequest: + sharder = &offsetForLeaderEpochSharder{cl} + case *kmsg.AddPartitionsToTxnRequest: + sharder = &addPartitionsToTxnSharder{cl} + case *kmsg.WriteTxnMarkersRequest: + sharder = &writeTxnMarkersSharder{cl} + case *kmsg.DescribeConfigsRequest: + sharder = &describeConfigsSharder{cl} + case *kmsg.AlterConfigsRequest: + sharder = &alterConfigsSharder{cl} + case *kmsg.AlterReplicaLogDirsRequest: + sharder = &alterReplicaLogDirsSharder{cl} + case *kmsg.DescribeLogDirsRequest: + sharder = &describeLogDirsSharder{cl} + case *kmsg.DeleteGroupsRequest: + sharder = &deleteGroupsSharder{cl} + case *kmsg.IncrementalAlterConfigsRequest: + sharder = &incrementalAlterConfigsSharder{cl} + case *kmsg.DescribeProducersRequest: + sharder = &describeProducersSharder{cl} + case *kmsg.DescribeTransactionsRequest: + sharder = &describeTransactionsSharder{cl} + case *kmsg.ListTransactionsRequest: + sharder = &listTransactionsSharder{cl} + } + + // If a request fails, we re-shard it (in case it needs to be split + // again). reqTry tracks how many total tries a request piece has had; + // we quit at either the max configured tries or max configured time. + type reqTry struct { + tries int + req kmsg.Request + lastErr error + } + + var ( + shardsMu sync.Mutex + shards []ResponseShard + + addShard = func(shard ResponseShard) { + shardsMu.Lock() + defer shardsMu.Unlock() + shards = append(shards, shard) + } + + start = time.Now() + retryTimeout = cl.cfg.retryTimeout(req.Key()) + + wg sync.WaitGroup + issue func(reqTry) + ) + + l := cl.cfg.logger + debug := l.Level() >= LogLevelDebug + + // issue is called to progressively split and issue requests. + // + // This recursively calls itself if a request fails and can be retried. + // We avoid stack problems because this calls itself in a goroutine. + issue = func(try reqTry) { + issues, reshardable, err := sharder.shard(ctx, try.req, try.lastErr) + if err != nil { + l.Log(LogLevelDebug, "unable to shard request", "req", kmsg.Key(try.req.Key()).Name(), "previous_tries", try.tries, "err", err) + addShard(shard(nil, try.req, nil, err)) // failure to shard means data loading failed; this request is failed + return + } + + // If the request actually does not need to be issued, we issue + // it to a random broker. There is no benefit to this, but at + // least we will return one shard. + if len(issues) == 0 { + issues = []issueShard{{ + req: try.req, + any: true, + }} + reshardable = true + } + + if debug { + var key int16 + var brokerAnys []string + for _, issue := range issues { + key = issue.req.Key() + if issue.err != nil { + brokerAnys = append(brokerAnys, "err") + } else if issue.any { + brokerAnys = append(brokerAnys, "any") + } else { + brokerAnys = append(brokerAnys, fmt.Sprintf("%d", issue.broker)) + } + } + l.Log(LogLevelDebug, "sharded request", "req", kmsg.Key(key).Name(), "destinations", brokerAnys) + } + + for i := range issues { + myIssue := issues[i] + myUnderlyingReq := myIssue.req + var isPinned bool + if pinned, ok := myIssue.req.(*pinReq); ok { + myUnderlyingReq = pinned.Request + isPinned = true + } + + if myIssue.err != nil { + addShard(shard(nil, myUnderlyingReq, nil, myIssue.err)) + continue + } + + tries := try.tries + wg.Add(1) + go func() { + defer wg.Done() + start: + tries++ + + broker := cl.broker() + var err error + if !myIssue.any { + broker, err = cl.brokerOrErr(ctx, myIssue.broker, errUnknownBroker) + } + if err != nil { + addShard(shard(nil, myUnderlyingReq, nil, err)) // failure to load a broker is a failure to issue a request + return + } + + resp, err := broker.waitResp(ctx, myIssue.req) + var errIsFromResp bool + if err == nil { + err = sharder.onResp(myUnderlyingReq, resp) // perform some potential cleanup, and potentially receive an error to retry + if ke := (*kerr.Error)(nil); errors.As(err, &ke) { + errIsFromResp = true + } + } + + // If we failed to issue the request, we *maybe* will retry. + // We could have failed to even issue the request or receive + // a response, which is retryable. + // + // If a pinned req fails with errBrokerTooOld, we always retry + // immediately. The request was not even issued. However, as a + // safety, we only do this 3 times to avoid some super weird + // pathological spin loop. + backoff := cl.cfg.retryBackoff(tries) + if err != nil && + (reshardable && isPinned && errors.Is(err, errBrokerTooOld) && tries <= 3) || + (retryTimeout == 0 || time.Now().Add(backoff).Sub(start) <= retryTimeout) && cl.shouldRetry(tries, err) && cl.waitTries(ctx, backoff) { + // Non-reshardable re-requests just jump back to the + // top where the broker is loaded. This is the case on + // requests where the original request is split to + // dedicated brokers; we do not want to re-shard that. + if !reshardable { + l.Log(LogLevelDebug, "sharded request failed, reissuing without resharding", "req", kmsg.Key(myIssue.req.Key()).Name(), "time_since_start", time.Since(start), "tries", try.tries, "err", err) + goto start + } + l.Log(LogLevelDebug, "sharded request failed, resharding and reissuing", "req", kmsg.Key(myIssue.req.Key()).Name(), "time_since_start", time.Since(start), "tries", try.tries, "err", err) + issue(reqTry{tries, myUnderlyingReq, err}) + return + } + + // If we pulled an error out of the response body in an attempt + // to possibly retry, the request was NOT an error that we want + // to bubble as a shard error. The request was successful, we + // have a response. Before we add the shard, strip the error. + // The end user can parse the response ErrorCode. + if errIsFromResp { + err = nil + } + addShard(shard(broker, myUnderlyingReq, resp, err)) // the error was not retryable + }() + } + } + + issue(reqTry{0, req, nil}) + wg.Wait() + + return shards, sharder.merge +} + +// For sharded errors, we prefer to keep retryable errors rather than +// non-retryable errors. We keep the non-retryable if everything is +// non-retryable. +// +// We favor retryable because retryable means we used a stale cache value; we +// clear the stale entries on failure and the retry uses fresh data. The +// request will be split and remapped, and the non-retryable errors will be +// encountered again. +func onRespShardErr(err *error, newKerr error) { + if newKerr == nil || *err != nil && kerr.IsRetriable(*err) { + return + } + *err = newKerr +} + +// a convenience function for when a request needs to be issued identically to +// all brokers. +func (cl *Client) allBrokersShardedReq(ctx context.Context, fn func() kmsg.Request) ([]issueShard, bool, error) { + if err := cl.fetchBrokerMetadata(ctx); err != nil { + return nil, false, err + } + + var issues []issueShard + cl.brokersMu.RLock() + for _, broker := range cl.brokers { + issues = append(issues, issueShard{ + req: fn(), + broker: broker.meta.NodeID, + }) + } + cl.brokersMu.RUnlock() + + return issues, false, nil // we do NOT re-shard these requests request +} + +// a convenience function for saving the first ResponseShard error. +func firstErrMerger(sresps []ResponseShard, merge func(kresp kmsg.Response)) error { + var firstErr error + for _, sresp := range sresps { + if sresp.Err != nil { + if firstErr == nil { + firstErr = sresp.Err + } + continue + } + merge(sresp.Resp) + } + return firstErr +} + +type mappedMetadataTopic struct { + t kmsg.MetadataResponseTopic + ps map[int32]kmsg.MetadataResponseTopicPartition + when time.Time +} + +// For NOT_LEADER_FOR_PARTITION: +// We always delete stale metadata. It's possible that a leader rebalance +// happened immediately after we requested metadata; we should not pin to +// the stale metadata for 1s. +// +// For UNKNOWN_TOPIC_OR_PARTITION: +// We only delete stale metadata if it is older than the min age or 1s, +// whichever is smaller. We use 1s even if min age is larger, because we want +// to encourage larger min age for caching purposes. More obvious would be to +// *always* evict the cache here, but if we *just* requested metadata, then +// evicting the cache would cause churn for a topic that genuinely does not +// exist. +func (cl *Client) maybeDeleteMappedMetadata(unknownTopic bool, ts ...string) (shouldRetry bool) { + if len(ts) == 0 { + return + } + + var min time.Duration + if unknownTopic { + min = time.Second + if cl.cfg.metadataMinAge < min { + min = cl.cfg.metadataMinAge + } + } + + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + for _, t := range ts { + tcached, exists := cl.mappedMeta[t] + if exists && (min == 0 || time.Since(tcached.when) > min) { + shouldRetry = true + delete(cl.mappedMeta, t) + } + } + return shouldRetry +} + +// We only cache for metadata min age. We could theoretically cache forever, +// but an out of band CreatePartitions can result in our metadata being stale +// and us never knowing. So, we choose metadata min age. There are only a few +// requests that are sharded and use metadata, and the one this benefits most +// is ListOffsets. Likely, ListOffsets for the same topic will be issued back +// to back, so not caching for so long is ok. +func (cl *Client) fetchCachedMappedMetadata(ts ...string) (map[string]mappedMetadataTopic, []string) { + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + if cl.mappedMeta == nil { + return nil, ts + } + cached := make(map[string]mappedMetadataTopic) + needed := ts[:0] + + for _, t := range ts { + tcached, exists := cl.mappedMeta[t] + if exists && time.Since(tcached.when) < cl.cfg.metadataMinAge { + cached[t] = tcached + } else { + needed = append(needed, t) + delete(cl.mappedMeta, t) + } + } + return cached, needed +} + +// fetchMappedMetadata provides a convenience type of working with metadata; +// this is garbage heavy, so it is only used in one off requests in this +// package. +func (cl *Client) fetchMappedMetadata(ctx context.Context, topics []string, useCache bool) (map[string]mappedMetadataTopic, error) { + var r map[string]mappedMetadataTopic + needed := topics + if useCache { + r, needed = cl.fetchCachedMappedMetadata(topics...) + if len(needed) == 0 { + return r, nil + } + } + if r == nil { + r = make(map[string]mappedMetadataTopic) + } + + _, meta, err := cl.fetchMetadataForTopics(ctx, false, needed) + if err != nil { + return nil, err + } + + // Cache the mapped metadata, and also store each topic in the results. + cl.storeCachedMappedMetadata(meta, func(entry mappedMetadataTopic) { + r[*entry.t.Topic] = entry + }) + + return r, nil +} + +// storeCachedMappedMetadata caches the fetched metadata in the Client, and calls the onEachTopic callback +// function for each topic in the MetadataResponse. +func (cl *Client) storeCachedMappedMetadata(meta *kmsg.MetadataResponse, onEachTopic func(_ mappedMetadataTopic)) { + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + if cl.mappedMeta == nil { + cl.mappedMeta = make(map[string]mappedMetadataTopic) + } + when := time.Now() + for _, topic := range meta.Topics { + if topic.Topic == nil { + // We do not request with topic IDs, so we should not + // receive topic IDs in the response. + continue + } + t := mappedMetadataTopic{ + t: topic, + ps: make(map[int32]kmsg.MetadataResponseTopicPartition), + when: when, + } + cl.mappedMeta[*topic.Topic] = t + for _, partition := range topic.Partitions { + t.ps[partition.Partition] = partition + } + + if onEachTopic != nil { + onEachTopic(t) + } + } + if len(meta.Topics) != len(cl.mappedMeta) { + for topic, mapped := range cl.mappedMeta { + if mapped.when.Equal(when) { + continue + } + if time.Since(mapped.when) > cl.cfg.metadataMinAge { + delete(cl.mappedMeta, topic) + } + } + } +} + +func unknownOrCode(exists bool, code int16) error { + if !exists { + return kerr.UnknownTopicOrPartition + } + return kerr.ErrorForCode(code) +} + +func noLeader(l int32) error { + if l < 0 { + return kerr.LeaderNotAvailable + } + return nil +} + +// This is a helper for the sharded requests below; if mapping metadata fails +// to load topics or partitions, we group the failures by error. +// +// We use a lot of reflect magic to make the actual usage much nicer. +type unknownErrShards struct { + // load err => topic => mystery slice type + // + // The mystery type is basically just []Partition, where Partition can + // be any kmsg type. + mapped map[error]map[string]reflect.Value +} + +// err stores a new failing partition with its failing error. +// +// partition's type is equal to the arg1 type of l.fn. +func (l *unknownErrShards) err(err error, topic string, partition any) { + if l.mapped == nil { + l.mapped = make(map[error]map[string]reflect.Value) + } + t := l.mapped[err] + if t == nil { + t = make(map[string]reflect.Value) + l.mapped[err] = t + } + slice, ok := t[topic] + if !ok { + // We make a slice of the input partition type. + slice = reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(partition)), 0, 1) + } + + t[topic] = reflect.Append(slice, reflect.ValueOf(partition)) +} + +// errs takes an input slice of partitions and stores each with its failing +// error. +// +// partitions is a slice where each element has type of arg1 of l.fn. +func (l *unknownErrShards) errs(err error, topic string, partitions any) { + v := reflect.ValueOf(partitions) + for i := 0; i < v.Len(); i++ { + l.err(err, topic, v.Index(i).Interface()) + } +} + +// Returns issueShards for each error stored in l. +// +// This takes a factory function: the first return is a new kmsg.Request, the +// second is a function that adds a topic and its partitions to that request. +// +// Thus, fn is of type func() (kmsg.Request, func(string, []P)) +func (l *unknownErrShards) collect(mkreq, mergeParts any) []issueShard { + if len(l.mapped) == 0 { + return nil + } + + var shards []issueShard + + factory := reflect.ValueOf(mkreq) + perTopic := reflect.ValueOf(mergeParts) + for err, topics := range l.mapped { + req := factory.Call(nil)[0] + + var ntopics, npartitions int + for topic, partitions := range topics { + ntopics++ + npartitions += partitions.Len() + perTopic.Call([]reflect.Value{req, reflect.ValueOf(topic), partitions}) + } + + shards = append(shards, issueShard{ + req: req.Interface().(kmsg.Request), + err: err, + }) + } + + return shards +} + +// handles sharding ListOffsetsRequest +type listOffsetsSharder struct{ *Client } + +func (cl *listOffsetsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListOffsetsRequest) + + // For listing offsets, we need the broker leader for each partition we + // are listing. Thus, we first load metadata for the topics. + // + // Metadata loading performs retries; if we fail here, the we do not + // issue sharded requests. + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.ListOffsetsRequestTopicPartition) + var unknowns unknownErrShards + + // For any topic or partition that had an error load, we blindly issue + // a load to the first seed broker. We expect the list to fail, but it + // is the best we could do. + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.ListOffsetsRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[t] = append(brokerReq[t], partition) + } + } + + mkreq := func() *kmsg.ListOffsetsRequest { + r := kmsg.NewPtrListOffsetsRequest() + r.ReplicaID = req.ReplicaID + r.IsolationLevel = req.IsolationLevel + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewListOffsetsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.ListOffsetsRequest, topic string, parts []kmsg.ListOffsetsRequestTopicPartition) { + reqTopic := kmsg.NewListOffsetsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *listOffsetsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.ListOffsetsResponse) + del []string + retErr error + unknownTopic bool + ) + + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*listOffsetsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListOffsetsResponse() + topics := make(map[string][]kmsg.ListOffsetsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListOffsetsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewListOffsetsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding OffsetFetchRequest +type offsetFetchSharder struct{ *Client } + +func offsetFetchReqToGroup(req *kmsg.OffsetFetchRequest) kmsg.OffsetFetchRequestGroup { + g := kmsg.NewOffsetFetchRequestGroup() + g.Group = req.Group + for _, topic := range req.Topics { + reqTopic := kmsg.NewOffsetFetchRequestGroupTopic() + reqTopic.Topic = topic.Topic + reqTopic.Partitions = topic.Partitions + g.Topics = append(g.Topics, reqTopic) + } + return g +} + +func offsetFetchGroupToReq(requireStable bool, group kmsg.OffsetFetchRequestGroup) *kmsg.OffsetFetchRequest { + req := kmsg.NewPtrOffsetFetchRequest() + req.RequireStable = requireStable + req.Group = group.Group + for _, topic := range group.Topics { + reqTopic := kmsg.NewOffsetFetchRequestTopic() + reqTopic.Topic = topic.Topic + reqTopic.Partitions = topic.Partitions + req.Topics = append(req.Topics, reqTopic) + } + return req +} + +func offsetFetchRespToGroup(req *kmsg.OffsetFetchRequest, resp *kmsg.OffsetFetchResponse) kmsg.OffsetFetchResponseGroup { + g := kmsg.NewOffsetFetchResponseGroup() + g.Group = req.Group + g.ErrorCode = resp.ErrorCode + for _, topic := range resp.Topics { + t := kmsg.NewOffsetFetchResponseGroupTopic() + t.Topic = topic.Topic + for _, partition := range topic.Partitions { + p := kmsg.NewOffsetFetchResponseGroupTopicPartition() + p.Partition = partition.Partition + p.Offset = partition.Offset + p.LeaderEpoch = partition.LeaderEpoch + p.Metadata = partition.Metadata + p.ErrorCode = partition.ErrorCode + t.Partitions = append(t.Partitions, p) + } + g.Topics = append(g.Topics, t) + } + return g +} + +func offsetFetchRespGroupIntoResp(g kmsg.OffsetFetchResponseGroup, into *kmsg.OffsetFetchResponse) { + into.ErrorCode = g.ErrorCode + into.Topics = into.Topics[:0] + for _, topic := range g.Topics { + t := kmsg.NewOffsetFetchResponseTopic() + t.Topic = topic.Topic + for _, partition := range topic.Partitions { + p := kmsg.NewOffsetFetchResponseTopicPartition() + p.Partition = partition.Partition + p.Offset = partition.Offset + p.LeaderEpoch = partition.LeaderEpoch + p.Metadata = partition.Metadata + p.ErrorCode = partition.ErrorCode + t.Partitions = append(t.Partitions, p) + } + into.Topics = append(into.Topics, t) + } +} + +func (cl *offsetFetchSharder) shard(ctx context.Context, kreq kmsg.Request, lastErr error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.OffsetFetchRequest) + + // We always try batching and only split at the end if lastErr + // indicates too old. We convert to batching immediately. + dup := *req + req = &dup + + if len(req.Groups) == 0 { + req.Groups = append(req.Groups, offsetFetchReqToGroup(req)) + } + groups := make([]string, 0, len(req.Groups)) + for i := range req.Groups { + groups = append(groups, req.Groups[i].Group) + } + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, groups...) + + // Loading coordinators can have each group fail with its unique error, + // or with a kerr.Error that can be merged. Unique errors get their own + // failure shard, while kerr.Error's get merged. + type unkerr struct { + err error + group kmsg.OffsetFetchRequestGroup + } + var ( + brokerReqs = make(map[int32]*kmsg.OffsetFetchRequest) + kerrs = make(map[*kerr.Error][]kmsg.OffsetFetchRequestGroup) + unkerrs []unkerr + ) + + newReq := func(groups ...kmsg.OffsetFetchRequestGroup) *kmsg.OffsetFetchRequest { + newReq := kmsg.NewPtrOffsetFetchRequest() + newReq.RequireStable = req.RequireStable + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group.Group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + splitReq := errors.Is(lastErr, errBrokerTooOld) + + var issues []issueShard + for id, req := range brokerReqs { + if splitReq { + for _, group := range req.Groups { + req := offsetFetchGroupToReq(req.RequireStable, group) + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMax: true, max: 7}, + broker: id, + }) + } + } else if len(req.Groups) == 1 { + single := offsetFetchGroupToReq(req.RequireStable, req.Groups[0]) + single.Groups = req.Groups + issues = append(issues, issueShard{ + req: single, + broker: id, + }) + } else { + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMin: len(req.Groups) > 1, min: 8}, + broker: id, + }) + } + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *offsetFetchSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.OffsetFetchRequest) + resp := kresp.(*kmsg.OffsetFetchResponse) + + switch len(resp.Groups) { + case 0: + // Requested no groups: move top level into batch for v0-v7 to + // v8 forward compat. + resp.Groups = append(resp.Groups, offsetFetchRespToGroup(req, resp)) + case 1: + // Requested 1 group v8+: set top level for v0-v7 back-compat. + offsetFetchRespGroupIntoResp(resp.Groups[0], resp) + default: + } + + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + + // For a final bit of extra fun, v0 and v1 do not have a top level + // error code but instead a per-partition error code. If the + // coordinator is loading &c, then all per-partition error codes are + // the same so we only need to look at the first partition. + if resp.Version < 2 && len(resp.Topics) > 0 && len(resp.Topics[0].Partitions) > 0 { + code := resp.Topics[0].Partitions[0].ErrorCode + err := kerr.ErrorForCode(code) + cl.maybeDeleteStaleCoordinator(req.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + + return retErr +} + +func (*offsetFetchSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrOffsetFetchResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.OffsetFetchResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + + // Old requests only support one group; *either* the commit + // used multiple groups and they are expecting the batch + // response, *or* the commit used one group and we always merge + // that one group into the old format. + if len(resp.Groups) == 1 { + offsetFetchRespGroupIntoResp(resp.Groups[0], merged) + } + }) +} + +// handles sharding FindCoordinatorRequest +type findCoordinatorSharder struct{ *Client } + +func findCoordinatorRespCoordinatorIntoResp(c kmsg.FindCoordinatorResponseCoordinator, into *kmsg.FindCoordinatorResponse) { + into.NodeID = c.NodeID + into.Host = c.Host + into.Port = c.Port + into.ErrorCode = c.ErrorCode + into.ErrorMessage = c.ErrorMessage +} + +func (*findCoordinatorSharder) shard(_ context.Context, kreq kmsg.Request, lastErr error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.FindCoordinatorRequest) + + // We always try batching and only split at the end if lastErr + // indicates too old. We convert to batching immediately. + dup := *req + req = &dup + + uniq := make(map[string]struct{}, len(req.CoordinatorKeys)) + if len(req.CoordinatorKeys) == 0 { + uniq[req.CoordinatorKey] = struct{}{} + } else { + for _, key := range req.CoordinatorKeys { + uniq[key] = struct{}{} + } + } + req.CoordinatorKeys = req.CoordinatorKeys[:0] + for key := range uniq { + req.CoordinatorKeys = append(req.CoordinatorKeys, key) + } + if len(req.CoordinatorKeys) == 1 { + req.CoordinatorKey = req.CoordinatorKeys[0] + } + + splitReq := errors.Is(lastErr, errBrokerTooOld) + if !splitReq { + // With only one key, we do not need to split nor pin this. + if len(req.CoordinatorKeys) <= 1 { + return []issueShard{{req: req, any: true}}, false, nil + } + return []issueShard{{ + req: &pinReq{Request: req, pinMin: true, min: 4}, + any: true, + }}, true, nil // this is "reshardable", in that we will split the request next + } + + var issues []issueShard + for _, key := range req.CoordinatorKeys { + sreq := kmsg.NewPtrFindCoordinatorRequest() + sreq.CoordinatorType = req.CoordinatorType + sreq.CoordinatorKey = key + issues = append(issues, issueShard{ + req: &pinReq{Request: sreq, pinMax: true, max: 3}, + any: true, + }) + } + return issues, false, nil // not reshardable +} + +func (*findCoordinatorSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.FindCoordinatorRequest) + resp := kresp.(*kmsg.FindCoordinatorResponse) + + switch len(resp.Coordinators) { + case 0: + // Convert v3 and prior to v4+ + rc := kmsg.NewFindCoordinatorResponseCoordinator() + rc.Key = req.CoordinatorKey + rc.NodeID = resp.NodeID + rc.Host = resp.Host + rc.Port = resp.Port + rc.ErrorCode = resp.ErrorCode + rc.ErrorMessage = resp.ErrorMessage + resp.Coordinators = append(resp.Coordinators, rc) + case 1: + // Convert v4 to v3 and prior + findCoordinatorRespCoordinatorIntoResp(resp.Coordinators[0], resp) + } + + return nil +} + +func (*findCoordinatorSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrFindCoordinatorResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.FindCoordinatorResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Coordinators = append(merged.Coordinators, resp.Coordinators...) + + if len(resp.Coordinators) == 1 { + findCoordinatorRespCoordinatorIntoResp(resp.Coordinators[0], merged) + } + }) +} + +// handles sharding DescribeGroupsRequest +type describeGroupsSharder struct{ *Client } + +func (cl *describeGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeGroupsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, req.Groups...) + type unkerr struct { + err error + group string + } + var ( + brokerReqs = make(map[int32]*kmsg.DescribeGroupsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(groups ...string) *kmsg.DescribeGroupsRequest { + newReq := kmsg.NewPtrDescribeGroupsRequest() + newReq.IncludeAuthorizedOperations = req.IncludeAuthorizedOperations + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *describeGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { // cleanup any stale groups + resp := kresp.(*kmsg.DescribeGroupsResponse) + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*describeGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handles sharding ListGroupsRequest +type listGroupsSharder struct{ *Client } + +func (cl *listGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListGroupsRequest) + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) +} + +func (*listGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.ListGroupsResponse) + return kerr.ErrorForCode(resp.ErrorCode) +} + +func (*listGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + if merged.ErrorCode == 0 { + merged.ErrorCode = resp.ErrorCode + } + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handle sharding DeleteRecordsRequest +type deleteRecordsSharder struct{ *Client } + +func (cl *deleteRecordsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DeleteRecordsRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.DeleteRecordsRequestTopicPartition) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.DeleteRecordsRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[t] = append(brokerReq[t], partition) + } + } + + mkreq := func() *kmsg.DeleteRecordsRequest { + r := kmsg.NewPtrDeleteRecordsRequest() + r.TimeoutMillis = req.TimeoutMillis + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDeleteRecordsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DeleteRecordsRequest, topic string, parts []kmsg.DeleteRecordsRequestTopicPartition) { + reqTopic := kmsg.NewDeleteRecordsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *deleteRecordsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.DeleteRecordsResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*deleteRecordsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDeleteRecordsResponse() + topics := make(map[string][]kmsg.DeleteRecordsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DeleteRecordsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewDeleteRecordsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handle sharding OffsetForLeaderEpochRequest +type offsetForLeaderEpochSharder struct{ *Client } + +func (cl *offsetForLeaderEpochSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.OffsetForLeaderEpochRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.OffsetForLeaderEpochRequestTopicPartition) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.OffsetForLeaderEpochRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + + mkreq := func() *kmsg.OffsetForLeaderEpochRequest { + r := kmsg.NewPtrOffsetForLeaderEpochRequest() + r.ReplicaID = req.ReplicaID + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewOffsetForLeaderEpochRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.OffsetForLeaderEpochRequest, topic string, parts []kmsg.OffsetForLeaderEpochRequestTopicPartition) { + reqTopic := kmsg.NewOffsetForLeaderEpochRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *offsetForLeaderEpochSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.OffsetForLeaderEpochResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*offsetForLeaderEpochSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrOffsetForLeaderEpochResponse() + topics := make(map[string][]kmsg.OffsetForLeaderEpochResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.OffsetForLeaderEpochResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewOffsetForLeaderEpochResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handle sharding AddPartitionsToTXn, where v4+ switched to batch requests +type addPartitionsToTxnSharder struct{ *Client } + +func addPartitionsReqToTxn(req *kmsg.AddPartitionsToTxnRequest) { + t := kmsg.NewAddPartitionsToTxnRequestTransaction() + t.TransactionalID = req.TransactionalID + t.ProducerID = req.ProducerID + t.ProducerEpoch = req.ProducerEpoch + for i := range req.Topics { + rt := &req.Topics[i] + tt := kmsg.NewAddPartitionsToTxnRequestTransactionTopic() + tt.Topic = rt.Topic + tt.Partitions = rt.Partitions + t.Topics = append(t.Topics, tt) + } + req.Transactions = append(req.Transactions, t) +} + +func addPartitionsTxnToReq(req *kmsg.AddPartitionsToTxnRequest) { + if len(req.Transactions) != 1 { + return + } + t0 := &req.Transactions[0] + req.TransactionalID = t0.TransactionalID + req.ProducerID = t0.ProducerID + req.ProducerEpoch = t0.ProducerEpoch + for _, tt := range t0.Topics { + rt := kmsg.NewAddPartitionsToTxnRequestTopic() + rt.Topic = tt.Topic + rt.Partitions = tt.Partitions + req.Topics = append(req.Topics, rt) + } +} + +func addPartitionsTxnToResp(resp *kmsg.AddPartitionsToTxnResponse) { + if len(resp.Transactions) == 0 { + return + } + t0 := &resp.Transactions[0] + for _, tt := range t0.Topics { + rt := kmsg.NewAddPartitionsToTxnResponseTopic() + rt.Topic = tt.Topic + for _, tp := range tt.Partitions { + rp := kmsg.NewAddPartitionsToTxnResponseTopicPartition() + rp.Partition = tp.Partition + rp.ErrorCode = tp.ErrorCode + rt.Partitions = append(rt.Partitions, rp) + } + resp.Topics = append(resp.Topics, rt) + } +} + +func (cl *addPartitionsToTxnSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AddPartitionsToTxnRequest) + + if len(req.Transactions) == 0 { + addPartitionsReqToTxn(req) + } + txnIDs := make([]string, 0, len(req.Transactions)) + for i := range req.Transactions { + txnIDs = append(txnIDs, req.Transactions[i].TransactionalID) + } + coordinators := cl.loadCoordinators(ctx, coordinatorTypeTxn, txnIDs...) + + type unkerr struct { + err error + txn kmsg.AddPartitionsToTxnRequestTransaction + } + var ( + brokerReqs = make(map[int32]*kmsg.AddPartitionsToTxnRequest) + kerrs = make(map[*kerr.Error][]kmsg.AddPartitionsToTxnRequestTransaction) + unkerrs []unkerr + ) + + newReq := func(txns ...kmsg.AddPartitionsToTxnRequestTransaction) *kmsg.AddPartitionsToTxnRequest { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.Transactions = txns + addPartitionsTxnToReq(req) + return req + } + + for _, txn := range req.Transactions { + berr := coordinators[txn.TransactionalID] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq(txn) + brokerReqs[berr.b.meta.NodeID] = brokerReq + } else { + brokerReq.Transactions = append(brokerReq.Transactions, txn) + } + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], txn) + default: + unkerrs = append(unkerrs, unkerr{berr.err, txn}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + if len(req.Transactions) <= 1 || len(req.Transactions) == 1 && !req.Transactions[0].VerifyOnly { + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMax: true, max: 3}, + broker: id, + }) + } else { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.txn), + err: unkerr.err, + }) + } + for kerr, txns := range kerrs { + issues = append(issues, issueShard{ + req: newReq(txns...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *addPartitionsToTxnSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.AddPartitionsToTxnRequest) + resp := kresp.(*kmsg.AddPartitionsToTxnResponse) + + // We default to the top level error, which is used in v4+. For v3 + // (case 0), we use the per-partition error, which is the same for + // every partition on not_coordinator errors. + code := resp.ErrorCode + if code == 0 && len(resp.Transactions) == 0 { + // Convert v3 and prior to v4+ + resptxn := kmsg.NewAddPartitionsToTxnResponseTransaction() + resptxn.TransactionalID = req.TransactionalID + for _, rt := range resp.Topics { + respt := kmsg.NewAddPartitionsToTxnResponseTransactionTopic() + respt.Topic = rt.Topic + for _, rp := range rt.Partitions { + respp := kmsg.NewAddPartitionsToTxnResponseTransactionTopicPartition() + respp.Partition = rp.Partition + respp.ErrorCode = rp.ErrorCode + code = rp.ErrorCode // v3 and prior has per-partition errors, not top level + respt.Partitions = append(respt.Partitions, respp) + } + resptxn.Topics = append(resptxn.Topics, respt) + } + resp.Transactions = append(resp.Transactions, resptxn) + } else { + // Convert v4 to v3 and prior: either we have a top level error + // code or we have at least one transaction. + // + // If the code is non-zero, we convert it to per-partition error + // codes; v3 does not have a top level err. + addPartitionsTxnToResp(resp) + if code != 0 { + for _, reqt := range req.Topics { + respt := kmsg.NewAddPartitionsToTxnResponseTopic() + respt.Topic = reqt.Topic + for _, reqp := range reqt.Partitions { + respp := kmsg.NewAddPartitionsToTxnResponseTopicPartition() + respp.Partition = reqp + respp.ErrorCode = resp.ErrorCode + respt.Partitions = append(respt.Partitions, respp) + } + resp.Topics = append(resp.Topics, respt) + } + } + } + if err := kerr.ErrorForCode(code); cl.maybeDeleteStaleCoordinator(req.TransactionalID, coordinatorTypeTxn, err) { + return err + } + return nil +} + +func (*addPartitionsToTxnSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAddPartitionsToTxnResponse() + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AddPartitionsToTxnResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.ErrorCode = resp.ErrorCode + merged.Transactions = append(merged.Transactions, resp.Transactions...) + }) + addPartitionsTxnToResp(merged) + return merged, firstErr +} + +// handle sharding WriteTxnMarkersRequest +type writeTxnMarkersSharder struct{ *Client } + +func (cl *writeTxnMarkersSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.WriteTxnMarkersRequest) + + var need []string + for _, marker := range req.Markers { + for _, topic := range marker.Topics { + need = append(need, topic.Topic) + } + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + type pidEpochCommit struct { + pid int64 + epoch int16 + commit bool + } + + brokerReqs := make(map[int32]map[pidEpochCommit]map[string][]int32) + unknown := make(map[error]map[pidEpochCommit]map[string][]int32) // err => pec => topic => partitions + + addreq := func(b int32, pec pidEpochCommit, t string, p int32) { + pecs := brokerReqs[b] + if pecs == nil { + pecs = make(map[pidEpochCommit]map[string][]int32) + brokerReqs[b] = pecs + } + ts := pecs[pec] + if ts == nil { + ts = make(map[string][]int32) + pecs[pec] = ts + } + ts[t] = append(ts[t], p) + } + addunk := func(err error, pec pidEpochCommit, t string, p int32) { + pecs := unknown[err] + if pecs == nil { + pecs = make(map[pidEpochCommit]map[string][]int32) + unknown[err] = pecs + } + ts := pecs[pec] + if ts == nil { + ts = make(map[string][]int32) + pecs[pec] = ts + } + ts[t] = append(ts[t], p) + } + + for _, marker := range req.Markers { + pec := pidEpochCommit{ + marker.ProducerID, + marker.ProducerEpoch, + marker.Committed, + } + for _, topic := range marker.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + for _, partition := range topic.Partitions { + addunk(err, pec, t, partition) + } + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + addunk(err, pec, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + addunk(err, pec, t, partition) + continue + } + addreq(p.Leader, pec, t, partition) + } + } + } + + mkreq := kmsg.NewPtrWriteTxnMarkersRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for pec, topics := range brokerReq { + rm := kmsg.NewWriteTxnMarkersRequestMarker() + rm.ProducerID = pec.pid + rm.ProducerEpoch = pec.epoch + rm.Committed = pec.commit + for topic, parts := range topics { + rt := kmsg.NewWriteTxnMarkersRequestMarkerTopic() + rt.Topic = topic + rt.Partitions = parts + rm.Topics = append(rm.Topics, rt) + } + req.Markers = append(req.Markers, rm) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + for err, errReq := range unknown { + req := mkreq() + for pec, topics := range errReq { + rm := kmsg.NewWriteTxnMarkersRequestMarker() + rm.ProducerID = pec.pid + rm.ProducerEpoch = pec.epoch + rm.Committed = pec.commit + for topic, parts := range topics { + rt := kmsg.NewWriteTxnMarkersRequestMarkerTopic() + rt.Topic = topic + rt.Partitions = parts + rm.Topics = append(rm.Topics, rt) + } + req.Markers = append(req.Markers, rm) + } + issues = append(issues, issueShard{ + req: req, + err: err, + }) + } + return issues, true, nil // this is reshardable +} + +func (cl *writeTxnMarkersSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.WriteTxnMarkersResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Markers { + m := &resp.Markers[i] + for j := range m.Topics { + t := &m.Topics[j] + for k := range t.Partitions { + p := &t.Partitions[k] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*writeTxnMarkersSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrWriteTxnMarkersResponse() + markers := make(map[int64]map[string][]kmsg.WriteTxnMarkersResponseMarkerTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.WriteTxnMarkersResponse) + merged.Version = resp.Version + for _, marker := range resp.Markers { + topics := markers[marker.ProducerID] + if topics == nil { + topics = make(map[string][]kmsg.WriteTxnMarkersResponseMarkerTopicPartition) + markers[marker.ProducerID] = topics + } + for _, topic := range marker.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + } + }) + for pid, topics := range markers { + respMarker := kmsg.NewWriteTxnMarkersResponseMarker() + respMarker.ProducerID = pid + for topic, partitions := range topics { + respTopic := kmsg.NewWriteTxnMarkersResponseMarkerTopic() + respTopic.Topic = topic + respTopic.Partitions = append(respTopic.Partitions, partitions...) + respMarker.Topics = append(respMarker.Topics, respTopic) + } + merged.Markers = append(merged.Markers, respMarker) + } + return merged, firstErr +} + +// handle sharding DescribeConfigsRequest +type describeConfigsSharder struct{ *Client } + +func (*describeConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.DescribeConfigsRequestResource) + var any []kmsg.DescribeConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrDescribeConfigsRequest() + newReq.Resources = brokerReq + newReq.IncludeSynonyms = req.IncludeSynonyms + newReq.IncludeDocumentation = req.IncludeDocumentation + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrDescribeConfigsRequest() + newReq.Resources = any + newReq.IncludeSynonyms = req.IncludeSynonyms + newReq.IncludeDocumentation = req.IncludeDocumentation + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*describeConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*describeConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handle sharding AlterConfigsRequest +type alterConfigsSharder struct{ *Client } + +func (*alterConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AlterConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.AlterConfigsRequestResource) + var any []kmsg.AlterConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrAlterConfigsRequest() + newReq.Resources = brokerReq + newReq.ValidateOnly = req.ValidateOnly + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrAlterConfigsRequest() + newReq.Resources = any + newReq.ValidateOnly = req.ValidateOnly + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*alterConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*alterConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAlterConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AlterConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handles sharding AlterReplicaLogDirsRequest +type alterReplicaLogDirsSharder struct{ *Client } + +func (cl *alterReplicaLogDirsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AlterReplicaLogDirsRequest) + + needMap := make(map[string]struct{}) + for _, dir := range req.Dirs { + for _, topic := range dir.Topics { + needMap[topic.Topic] = struct{}{} + } + } + var need []string + for topic := range needMap { + need = append(need, topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, false) // bypass cache, tricky to manage response + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string]map[string][]int32) // broker => dir => topic => partitions + unknowns := make(map[error]map[string]map[string][]int32) // err => dir => topic => partitions + + addBroker := func(broker int32, dir, topic string, partition int32) { + brokerDirs := brokerReqs[broker] + if brokerDirs == nil { + brokerDirs = make(map[string]map[string][]int32) + brokerReqs[broker] = brokerDirs + } + dirTopics := brokerDirs[dir] + if dirTopics == nil { + dirTopics = make(map[string][]int32) + brokerDirs[dir] = dirTopics + } + dirTopics[topic] = append(dirTopics[topic], partition) + } + + addUnknown := func(err error, dir, topic string, partition int32) { + dirs := unknowns[err] + if dirs == nil { + dirs = make(map[string]map[string][]int32) + unknowns[err] = dirs + } + dirTopics := dirs[dir] + if dirTopics == nil { + dirTopics = make(map[string][]int32) + dirs[dir] = dirTopics + } + dirTopics[topic] = append(dirTopics[topic], partition) + } + + for _, dir := range req.Dirs { + for _, topic := range dir.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + for _, partition := range topic.Partitions { + addUnknown(err, dir.Dir, t, partition) + } + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + addUnknown(err, dir.Dir, t, partition) + continue + } + + for _, replica := range p.Replicas { + addBroker(replica, dir.Dir, t, partition) + } + } + } + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := kmsg.NewPtrAlterReplicaLogDirsRequest() + for dir, topics := range brokerReq { + rd := kmsg.NewAlterReplicaLogDirsRequestDir() + rd.Dir = dir + for topic, partitions := range topics { + rdTopic := kmsg.NewAlterReplicaLogDirsRequestDirTopic() + rdTopic.Topic = topic + rdTopic.Partitions = partitions + rd.Topics = append(rd.Topics, rdTopic) + } + req.Dirs = append(req.Dirs, rd) + } + + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + for err, dirs := range unknowns { + req := kmsg.NewPtrAlterReplicaLogDirsRequest() + for dir, topics := range dirs { + rd := kmsg.NewAlterReplicaLogDirsRequestDir() + rd.Dir = dir + for topic, partitions := range topics { + rdTopic := kmsg.NewAlterReplicaLogDirsRequestDirTopic() + rdTopic.Topic = topic + rdTopic.Partitions = partitions + rd.Topics = append(rd.Topics, rdTopic) + } + req.Dirs = append(req.Dirs, rd) + } + + issues = append(issues, issueShard{ + req: req, + err: err, + }) + } + + return issues, true, nil // this is reshardable +} + +func (*alterReplicaLogDirsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // topic / partitions: not retried + +// merge does not make sense for this function, but we provide a one anyway. +func (*alterReplicaLogDirsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAlterReplicaLogDirsResponse() + topics := make(map[string][]kmsg.AlterReplicaLogDirsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AlterReplicaLogDirsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewAlterReplicaLogDirsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding DescribeLogDirsRequest +type describeLogDirsSharder struct{ *Client } + +func (cl *describeLogDirsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeLogDirsRequest) + + // If req.Topics is nil, the request is to describe all logdirs. Thus, + // we will issue the request to all brokers (similar to ListGroups). + if req.Topics == nil { + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) + } + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, false) // bypass cache, tricky to manage response + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]int32) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + + for _, replica := range p.Replicas { + brokerReq := brokerReqs[replica] + if brokerReq == nil { + brokerReq = make(map[string][]int32) + brokerReqs[replica] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + } + + mkreq := kmsg.NewPtrDescribeLogDirsRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDescribeLogDirsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DescribeLogDirsRequest, topic string, parts []int32) { + reqTopic := kmsg.NewDescribeLogDirsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (*describeLogDirsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // topic / configs: not retried + +// merge does not make sense for this function, but we provide one anyway. +// We lose the error code for directories. +func (*describeLogDirsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeLogDirsResponse() + dirs := make(map[string]map[string][]kmsg.DescribeLogDirsResponseDirTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeLogDirsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, dir := range resp.Dirs { + mergeDir := dirs[dir.Dir] + if mergeDir == nil { + mergeDir = make(map[string][]kmsg.DescribeLogDirsResponseDirTopicPartition) + dirs[dir.Dir] = mergeDir + } + for _, topic := range dir.Topics { + mergeDir[topic.Topic] = append(mergeDir[topic.Topic], topic.Partitions...) + } + } + }) + for dir, topics := range dirs { + md := kmsg.NewDescribeLogDirsResponseDir() + md.Dir = dir + for topic, partitions := range topics { + mdTopic := kmsg.NewDescribeLogDirsResponseDirTopic() + mdTopic.Topic = topic + mdTopic.Partitions = partitions + md.Topics = append(md.Topics, mdTopic) + } + merged.Dirs = append(merged.Dirs, md) + } + return merged, firstErr +} + +// handles sharding DeleteGroupsRequest +type deleteGroupsSharder struct{ *Client } + +func (cl *deleteGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DeleteGroupsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, req.Groups...) + type unkerr struct { + err error + group string + } + var ( + brokerReqs = make(map[int32]*kmsg.DeleteGroupsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(groups ...string) *kmsg.DeleteGroupsRequest { + newReq := kmsg.NewPtrDeleteGroupsRequest() + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *deleteGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.DeleteGroupsResponse) + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*deleteGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDeleteGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DeleteGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handle sharding IncrementalAlterConfigsRequest +type incrementalAlterConfigsSharder struct{ *Client } + +func (*incrementalAlterConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.IncrementalAlterConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.IncrementalAlterConfigsRequestResource) + var any []kmsg.IncrementalAlterConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrIncrementalAlterConfigsRequest() + newReq.Resources = brokerReq + newReq.ValidateOnly = req.ValidateOnly + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrIncrementalAlterConfigsRequest() + newReq.Resources = any + newReq.ValidateOnly = req.ValidateOnly + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*incrementalAlterConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*incrementalAlterConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrIncrementalAlterConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.IncrementalAlterConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handle sharding DescribeProducersRequest +type describeProducersSharder struct{ *Client } + +func (cl *describeProducersSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeProducersRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]int32) // broker => topic => partitions + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]int32) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + + mkreq := kmsg.NewPtrDescribeProducersRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDescribeProducersRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DescribeProducersRequest, topic string, parts []int32) { + reqTopic := kmsg.NewDescribeProducersRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *describeProducersSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.DescribeProducersResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*describeProducersSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeProducersResponse() + topics := make(map[string][]kmsg.DescribeProducersResponseTopicPartition) + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeProducersResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewDescribeProducersResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding DescribeTransactionsRequest +type describeTransactionsSharder struct{ *Client } + +func (cl *describeTransactionsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeTransactionsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeTxn, req.TransactionalIDs...) + type unkerr struct { + err error + txnID string + } + var ( + brokerReqs = make(map[int32]*kmsg.DescribeTransactionsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(txnIDs ...string) *kmsg.DescribeTransactionsRequest { + r := kmsg.NewPtrDescribeTransactionsRequest() + r.TransactionalIDs = txnIDs + return r + } + + for _, txnID := range req.TransactionalIDs { + berr := coordinators[txnID] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.TransactionalIDs = append(brokerReq.TransactionalIDs, txnID) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], txnID) + default: + unkerrs = append(unkerrs, unkerr{berr.err, txnID}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.txnID), + err: unkerr.err, + }) + } + for kerr, txnIDs := range kerrs { + issues = append(issues, issueShard{ + req: newReq(txnIDs...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *describeTransactionsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { // cleanup any stale coordinators + resp := kresp.(*kmsg.DescribeTransactionsResponse) + var retErr error + for i := range resp.TransactionStates { + txnState := &resp.TransactionStates[i] + err := kerr.ErrorForCode(txnState.ErrorCode) + cl.maybeDeleteStaleCoordinator(txnState.TransactionalID, coordinatorTypeTxn, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*describeTransactionsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeTransactionsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeTransactionsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.TransactionStates = append(merged.TransactionStates, resp.TransactionStates...) + }) +} + +// handles sharding ListTransactionsRequest +type listTransactionsSharder struct{ *Client } + +func (cl *listTransactionsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListTransactionsRequest) + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) +} + +func (*listTransactionsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.ListTransactionsResponse) + return kerr.ErrorForCode(resp.ErrorCode) +} + +func (*listTransactionsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListTransactionsResponse() + + unknownStates := make(map[string]struct{}) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListTransactionsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + if merged.ErrorCode == 0 { + merged.ErrorCode = resp.ErrorCode + } + for _, state := range resp.UnknownStateFilters { + unknownStates[state] = struct{}{} + } + merged.TransactionStates = append(merged.TransactionStates, resp.TransactionStates...) + }) + for unknownState := range unknownStates { + merged.UnknownStateFilters = append(merged.UnknownStateFilters, unknownState) + } + + return merged, firstErr +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go b/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go new file mode 100644 index 000000000000..fe8ad645bbda --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go @@ -0,0 +1,346 @@ +package kgo + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "errors" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/s2" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +var byteBuffers = sync.Pool{New: func() any { return bytes.NewBuffer(make([]byte, 8<<10)) }} + +type codecType int8 + +const ( + codecNone codecType = iota + codecGzip + codecSnappy + codecLZ4 + codecZstd +) + +// CompressionCodec configures how records are compressed before being sent. +// +// Records are compressed within individual topics and partitions, inside of a +// RecordBatch. All records in a RecordBatch are compressed into one record +// for that batch. +type CompressionCodec struct { + codec codecType + level int +} + +// NoCompression is a compression option that avoids compression. This can +// always be used as a fallback compression. +func NoCompression() CompressionCodec { return CompressionCodec{codecNone, 0} } + +// GzipCompression enables gzip compression with the default compression level. +func GzipCompression() CompressionCodec { return CompressionCodec{codecGzip, gzip.DefaultCompression} } + +// SnappyCompression enables snappy compression. +func SnappyCompression() CompressionCodec { return CompressionCodec{codecSnappy, 0} } + +// Lz4Compression enables lz4 compression with the fastest compression level. +func Lz4Compression() CompressionCodec { return CompressionCodec{codecLZ4, 0} } + +// ZstdCompression enables zstd compression with the default compression level. +func ZstdCompression() CompressionCodec { return CompressionCodec{codecZstd, 0} } + +// WithLevel changes the compression codec's "level", effectively allowing for +// higher or lower compression ratios at the expense of CPU speed. +// +// For the zstd package, the level is a typed int; simply convert the type back +// to an int for this function. +// +// If the level is invalid, compressors just use a default level. +func (c CompressionCodec) WithLevel(level int) CompressionCodec { + c.level = level + return c +} + +type compressor struct { + options []codecType + gzPool sync.Pool + lz4Pool sync.Pool + zstdPool sync.Pool +} + +func newCompressor(codecs ...CompressionCodec) (*compressor, error) { + if len(codecs) == 0 { + return nil, nil + } + + used := make(map[codecType]bool) // we keep one type of codec per CompressionCodec + var keepIdx int + for _, codec := range codecs { + if _, exists := used[codec.codec]; exists { + continue + } + used[codec.codec] = true + codecs[keepIdx] = codec + keepIdx++ + } + codecs = codecs[:keepIdx] + + for _, codec := range codecs { + if codec.codec < 0 || codec.codec > 4 { + return nil, errors.New("unknown compression codec") + } + } + + c := new(compressor) + +out: + for _, codec := range codecs { + c.options = append(c.options, codec.codec) + switch codec.codec { + case codecNone: + break out + case codecGzip: + level := gzip.DefaultCompression + if codec.level != 0 { + if _, err := gzip.NewWriterLevel(nil, codec.level); err != nil { + level = codec.level + } + } + c.gzPool = sync.Pool{New: func() any { c, _ := gzip.NewWriterLevel(nil, level); return c }} + case codecSnappy: // (no pool needed for snappy) + case codecLZ4: + level := codec.level + if level < 0 { + level = 0 // 0 == lz4.Fast + } + fn := func() any { return lz4.NewWriter(new(bytes.Buffer)) } + w := lz4.NewWriter(new(bytes.Buffer)) + if err := w.Apply(lz4.CompressionLevelOption(lz4.CompressionLevel(level))); err == nil { + fn = func() any { + w := lz4.NewWriter(new(bytes.Buffer)) + w.Apply(lz4.CompressionLevelOption(lz4.CompressionLevel(level))) + return w + } + } + w.Close() + c.lz4Pool = sync.Pool{New: fn} + case codecZstd: + opts := []zstd.EOption{ + zstd.WithWindowSize(64 << 10), + zstd.WithEncoderConcurrency(1), + zstd.WithZeroFrames(true), + } + fn := func() any { + zstdEnc, _ := zstd.NewWriter(nil, opts...) + r := &zstdEncoder{zstdEnc} + runtime.SetFinalizer(r, func(r *zstdEncoder) { r.inner.Close() }) + return r + } + zstdEnc, err := zstd.NewWriter(nil, append(opts, zstd.WithEncoderLevel(zstd.EncoderLevel(codec.level)))...) + if err == nil { + zstdEnc.Close() + opts = append(opts, zstd.WithEncoderLevel(zstd.EncoderLevel(codec.level))) + } + c.zstdPool = sync.Pool{New: fn} + } + } + + if c.options[0] == codecNone { + return nil, nil // first codec was passthrough + } + + return c, nil +} + +type zstdEncoder struct { + inner *zstd.Encoder +} + +// Compress compresses src to buf, returning buf's inner slice once done or nil +// if an error is encountered. +// +// The writer should be put back to its pool after the returned slice is done +// being used. +func (c *compressor) compress(dst *bytes.Buffer, src []byte, produceRequestVersion int16) ([]byte, codecType) { + var use codecType + for _, option := range c.options { + if option == codecZstd && produceRequestVersion < 7 { + continue + } + use = option + break + } + + var out []byte + switch use { + case codecNone: + return src, 0 + case codecGzip: + gz := c.gzPool.Get().(*gzip.Writer) + defer c.gzPool.Put(gz) + gz.Reset(dst) + if _, err := gz.Write(src); err != nil { + return nil, -1 + } + if err := gz.Close(); err != nil { + return nil, -1 + } + out = dst.Bytes() + case codecLZ4: + lz := c.lz4Pool.Get().(*lz4.Writer) + defer c.lz4Pool.Put(lz) + lz.Reset(dst) + if _, err := lz.Write(src); err != nil { + return nil, -1 + } + if err := lz.Close(); err != nil { + return nil, -1 + } + out = dst.Bytes() + case codecSnappy: + // Because the Snappy and Zstd codecs do not accept an io.Writer interface + // and directly take a []byte slice, here, the underlying []byte slice (`dst`) + // obtained from the bytes.Buffer{} from the pool is passed. + // As the `Write()` method on the buffer isn't used, its internal + // book-keeping goes out of sync, making the buffer unusable for further + // reading and writing via it's (eg: accessing via `Byte()`). For subsequent + // reads, the underlying slice has to be used directly. + // + // In this particular context, it is acceptable as there there are no subsequent + // operations performed on the buffer and it is immediately returned to the + // pool and `Reset()` the next time it is obtained and used where `compress()` + // is called. + if l := s2.MaxEncodedLen(len(src)); l > dst.Cap() { + dst.Grow(l) + } + out = s2.EncodeSnappy(dst.Bytes(), src) + case codecZstd: + zstdEnc := c.zstdPool.Get().(*zstdEncoder) + defer c.zstdPool.Put(zstdEnc) + if l := zstdEnc.inner.MaxEncodedSize(len(src)); l > dst.Cap() { + dst.Grow(l) + } + out = zstdEnc.inner.EncodeAll(src, dst.Bytes()) + } + + return out, use +} + +type decompressor struct { + ungzPool sync.Pool + unlz4Pool sync.Pool + unzstdPool sync.Pool +} + +func newDecompressor() *decompressor { + d := &decompressor{ + ungzPool: sync.Pool{ + New: func() any { return new(gzip.Reader) }, + }, + unlz4Pool: sync.Pool{ + New: func() any { return lz4.NewReader(nil) }, + }, + unzstdPool: sync.Pool{ + New: func() any { + zstdDec, _ := zstd.NewReader(nil, + zstd.WithDecoderLowmem(true), + zstd.WithDecoderConcurrency(1), + ) + r := &zstdDecoder{zstdDec} + runtime.SetFinalizer(r, func(r *zstdDecoder) { + r.inner.Close() + }) + return r + }, + }, + } + return d +} + +type zstdDecoder struct { + inner *zstd.Decoder +} + +func (d *decompressor) decompress(src []byte, codec byte) ([]byte, error) { + // Early return in case there is no compression + compCodec := codecType(codec) + if compCodec == codecNone { + return src, nil + } + out := byteBuffers.Get().(*bytes.Buffer) + out.Reset() + defer byteBuffers.Put(out) + + switch compCodec { + case codecGzip: + ungz := d.ungzPool.Get().(*gzip.Reader) + defer d.ungzPool.Put(ungz) + if err := ungz.Reset(bytes.NewReader(src)); err != nil { + return nil, err + } + if _, err := io.Copy(out, ungz); err != nil { + return nil, err + } + return append([]byte(nil), out.Bytes()...), nil + case codecSnappy: + if len(src) > 16 && bytes.HasPrefix(src, xerialPfx) { + return xerialDecode(src) + } + decoded, err := s2.Decode(out.Bytes(), src) + if err != nil { + return nil, err + } + return append([]byte(nil), decoded...), nil + case codecLZ4: + unlz4 := d.unlz4Pool.Get().(*lz4.Reader) + defer d.unlz4Pool.Put(unlz4) + unlz4.Reset(bytes.NewReader(src)) + if _, err := io.Copy(out, unlz4); err != nil { + return nil, err + } + return append([]byte(nil), out.Bytes()...), nil + case codecZstd: + unzstd := d.unzstdPool.Get().(*zstdDecoder) + defer d.unzstdPool.Put(unzstd) + decoded, err := unzstd.inner.DecodeAll(src, out.Bytes()) + if err != nil { + return nil, err + } + return append([]byte(nil), decoded...), nil + default: + return nil, errors.New("unknown compression codec") + } +} + +var xerialPfx = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +var errMalformedXerial = errors.New("malformed xerial framing") + +func xerialDecode(src []byte) ([]byte, error) { + // bytes 0-8: xerial header + // bytes 8-16: xerial version + // everything after: uint32 chunk size, snappy chunk + // we come into this function knowing src is at least 16 + src = src[16:] + var dst, chunk []byte + var err error + for len(src) > 0 { + if len(src) < 4 { + return nil, errMalformedXerial + } + size := int32(binary.BigEndian.Uint32(src)) + src = src[4:] + if size < 0 || len(src) < int(size) { + return nil, errMalformedXerial + } + if chunk, err = s2.Decode(chunk[:cap(chunk)], src[:size]); err != nil { + return nil, err + } + src = src[size:] + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/config.go b/vendor/github.com/twmb/franz-go/pkg/kgo/config.go new file mode 100644 index 000000000000..92ebeaa39055 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/config.go @@ -0,0 +1,1758 @@ +package kgo + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "math" + "math/rand" + "net" + "regexp" + "runtime/debug" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/kversion" + "github.com/twmb/franz-go/pkg/sasl" +) + +// Opt is an option to configure a client. +type Opt interface { + apply(*cfg) +} + +// ProducerOpt is a producer specific option to configure a client. +// This is simply a namespaced Opt. +type ProducerOpt interface { + Opt + producerOpt() +} + +// ConsumerOpt is a consumer specific option to configure a client. +// This is simply a namespaced Opt. +type ConsumerOpt interface { + Opt + consumerOpt() +} + +// GroupOpt is a consumer group specific option to configure a client. +// This is simply a namespaced Opt. +type GroupOpt interface { + Opt + groupOpt() +} + +type ( + clientOpt struct{ fn func(*cfg) } + producerOpt struct{ fn func(*cfg) } + consumerOpt struct{ fn func(*cfg) } + groupOpt struct{ fn func(*cfg) } +) + +func (opt clientOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt producerOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt consumerOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt groupOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (producerOpt) producerOpt() {} +func (consumerOpt) consumerOpt() {} +func (groupOpt) groupOpt() {} + +// A cfg can be written to while initializing a client, and after that it is +// (mostly) only ever read from. Some areas can continue to be modified -- +// particularly reconfiguring what to consume from -- but most areas are +// static. +type cfg struct { + ///////////////////// + // GENERAL SECTION // + ///////////////////// + + id *string // client ID + dialFn func(context.Context, string, string) (net.Conn, error) + dialTimeout time.Duration + dialTLS *tls.Config + requestTimeoutOverhead time.Duration + connIdleTimeout time.Duration + + softwareName string // KIP-511 + softwareVersion string // KIP-511 + + logger Logger + + seedBrokers []string + maxVersions *kversion.Versions + minVersions *kversion.Versions + + retryBackoff func(int) time.Duration + retries int64 + retryTimeout func(int16) time.Duration + + maxBrokerWriteBytes int32 + maxBrokerReadBytes int32 + + allowAutoTopicCreation bool + + metadataMaxAge time.Duration + metadataMinAge time.Duration + + sasls []sasl.Mechanism + + hooks hooks + + ////////////////////// + // PRODUCER SECTION // + ////////////////////// + + txnID *string + txnTimeout time.Duration + acks Acks + disableIdempotency bool + maxProduceInflight int // if idempotency is disabled, we allow a configurable max inflight + compression []CompressionCodec // order of preference + + defaultProduceTopic string + maxRecordBatchBytes int32 + maxBufferedRecords int64 + maxBufferedBytes int64 + produceTimeout time.Duration + recordRetries int64 + maxUnknownFailures int64 + linger time.Duration + recordTimeout time.Duration + manualFlushing bool + txnBackoff time.Duration + missingTopicDelete time.Duration + + partitioner Partitioner + + stopOnDataLoss bool + onDataLoss func(string, int32) + + ////////////////////// + // CONSUMER SECTION // + ////////////////////// + + maxWait int32 + minBytes int32 + maxBytes lazyI32 + maxPartBytes lazyI32 + resetOffset Offset + isolationLevel int8 + keepControl bool + rack string + preferLagFn PreferLagFn + + maxConcurrentFetches int + disableFetchSessions bool + keepRetryableFetchErrors bool + + topics map[string]*regexp.Regexp // topics to consume; if regex is true, values are compiled regular expressions + partitions map[string]map[int32]Offset // partitions to directly consume from + regex bool + + //////////////////////////// + // CONSUMER GROUP SECTION // + //////////////////////////// + + group string // group we are in + instanceID *string // optional group instance ID + balancers []GroupBalancer // balancers we can use + protocol string // "consumer" by default, expected to never be overridden + + sessionTimeout time.Duration + rebalanceTimeout time.Duration + heartbeatInterval time.Duration + requireStable bool + + onAssigned func(context.Context, *Client, map[string][]int32) + onRevoked func(context.Context, *Client, map[string][]int32) + onLost func(context.Context, *Client, map[string][]int32) + onFetched func(context.Context, *Client, *kmsg.OffsetFetchResponse) error + + adjustOffsetsBeforeAssign func(ctx context.Context, offsets map[string]map[int32]Offset) (map[string]map[int32]Offset, error) + + blockRebalanceOnPoll bool + + setAssigned bool + setRevoked bool + setLost bool + setCommitCallback bool + + autocommitDisable bool // true if autocommit was disabled or we are transactional + autocommitGreedy bool + autocommitMarks bool + autocommitInterval time.Duration + commitCallback func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) +} + +func (cfg *cfg) validate() error { + if len(cfg.seedBrokers) == 0 { + return errors.New("config erroneously has no seed brokers") + } + + // We clamp maxPartBytes to maxBytes because some fake Kafka endpoints + // (Oracle) cannot handle the mismatch correctly. + if cfg.maxPartBytes > cfg.maxBytes { + cfg.maxPartBytes = cfg.maxBytes + } + + if cfg.disableIdempotency { + if cfg.txnID != nil { + return errors.New("cannot both disable idempotent writes and use transactional IDs") + } + if cfg.maxProduceInflight <= 0 { + return fmt.Errorf("invalid max produce inflight %d with idempotency disabled", cfg.maxProduceInflight) + } + } else { + if cfg.acks.val != -1 { + return errors.New("idempotency requires acks=all") + } + if cfg.maxProduceInflight != 1 { + return fmt.Errorf("invalid usage of MaxProduceRequestsInflightPerBroker with idempotency enabled") + } + } + + for _, limit := range []struct { + name string + sp **string // if field is a *string, we take addr to it + s string + allowed int + }{ + // A 256 byte ID / software name & version is good enough and + // fits with our max broker write byte min of 1K. + {name: "client id", sp: &cfg.id, allowed: 256}, + {name: "software name", s: cfg.softwareName, allowed: 256}, + {name: "software version", s: cfg.softwareVersion, allowed: 256}, + + // The following is the limit transitioning from two byte + // prefix for flexible stuff to three bytes; as with above, it + // is more than reasonable. + {name: "transactional id", sp: &cfg.txnID, allowed: 16382}, + + {name: "rack", s: cfg.rack, allowed: 512}, + } { + s := limit.s + if limit.sp != nil && *limit.sp != nil { + s = **limit.sp + } + if len(s) > limit.allowed { + return fmt.Errorf("%s length %d is larger than max allowed %d", limit.name, len(s), limit.allowed) + } + } + + i64lt := func(l, r int64) (bool, string) { return l < r, "less" } + i64gt := func(l, r int64) (bool, string) { return l > r, "larger" } + for _, limit := range []struct { + name string + v int64 + allowed int64 + badcmp func(int64, int64) (bool, string) + + fmt string + durs bool + }{ + // Min write of 1K and max of 1G is reasonable. + {name: "max broker write bytes", v: int64(cfg.maxBrokerWriteBytes), allowed: 1 << 10, badcmp: i64lt}, + {name: "max broker write bytes", v: int64(cfg.maxBrokerWriteBytes), allowed: 1 << 30, badcmp: i64gt}, + + // Same for read bytes. + {name: "max broker read bytes", v: int64(cfg.maxBrokerReadBytes), allowed: 1 << 10, badcmp: i64lt}, + {name: "max broker read bytes", v: int64(cfg.maxBrokerReadBytes), allowed: 1 << 30, badcmp: i64gt}, + + // For batches, we want at least 512 (reasonable), and the + // upper limit is the max num when a uvarint transitions from 4 + // to 5 bytes. The upper limit is also more than reasonable + // (256MiB). + {name: "max record batch bytes", v: int64(cfg.maxRecordBatchBytes), allowed: 512, badcmp: i64lt}, + {name: "max record batch bytes", v: int64(cfg.maxRecordBatchBytes), allowed: 256 << 20, badcmp: i64gt}, + + // We do not want the broker write bytes to be less than the + // record batch bytes, nor the read bytes to be less than what + // we indicate to fetch. + // + // We cannot enforce if a single batch is larger than the max + // fetch bytes limit, but hopefully we do not run into that. + {v: int64(cfg.maxBrokerWriteBytes), allowed: int64(cfg.maxRecordBatchBytes), badcmp: i64lt, fmt: "max broker write bytes %v is erroneously less than max record batch bytes %v"}, + {v: int64(cfg.maxBrokerReadBytes), allowed: int64(cfg.maxBytes), badcmp: i64lt, fmt: "max broker read bytes %v is erroneously less than max fetch bytes %v"}, + + // 0 <= allowed concurrency + {name: "max concurrent fetches", v: int64(cfg.maxConcurrentFetches), allowed: 0, badcmp: i64lt}, + + // 1s <= request timeout overhead <= 15m + {name: "request timeout max overhead", v: int64(cfg.requestTimeoutOverhead), allowed: int64(15 * time.Minute), badcmp: i64gt, durs: true}, + {name: "request timeout min overhead", v: int64(cfg.requestTimeoutOverhead), allowed: int64(time.Second), badcmp: i64lt, durs: true}, + + // 1s <= conn idle <= 15m + {name: "conn min idle timeout", v: int64(cfg.connIdleTimeout), allowed: int64(time.Second), badcmp: i64lt, durs: true}, + {name: "conn max idle timeout", v: int64(cfg.connIdleTimeout), allowed: int64(15 * time.Minute), badcmp: i64gt, durs: true}, + + // 10ms <= metadata <= 1hr + {name: "metadata max age", v: int64(cfg.metadataMaxAge), allowed: int64(time.Hour), badcmp: i64gt, durs: true}, + {name: "metadata min age", v: int64(cfg.metadataMinAge), allowed: int64(10 * time.Millisecond), badcmp: i64lt, durs: true}, + {v: int64(cfg.metadataMaxAge), allowed: int64(cfg.metadataMinAge), badcmp: i64lt, fmt: "metadata max age %v is erroneously less than metadata min age %v", durs: true}, + + // Some random producer settings. + {name: "max buffered records", v: cfg.maxBufferedRecords, allowed: 1, badcmp: i64lt}, + {name: "max buffered bytes", v: cfg.maxBufferedBytes, allowed: 0, badcmp: i64lt}, + {name: "linger", v: int64(cfg.linger), allowed: int64(time.Minute), badcmp: i64gt, durs: true}, + {name: "produce timeout", v: int64(cfg.produceTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "record timeout", v: int64(cfg.recordTimeout), allowed: int64(time.Second), badcmp: func(l, r int64) (bool, string) { + if l == 0 { + return false, "" // we print nothing when things are good + } + return l < r, "less" + }, durs: true}, + + // Consumer settings. maxWait is stored as int32 milliseconds, + // but we want the error message to be in the nice + // time.Duration string format. + {name: "max fetch wait", v: int64(cfg.maxWait) * int64(time.Millisecond), allowed: int64(10 * time.Millisecond), badcmp: i64lt, durs: true}, + + // Group settings. + {name: "number of balancers", v: int64(len(cfg.balancers)), allowed: 1, badcmp: i64lt}, + {name: "consumer protocol length", v: int64(len(cfg.protocol)), allowed: 1, badcmp: i64lt}, + + {name: "session timeout", v: int64(cfg.sessionTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "rebalance timeout", v: int64(cfg.rebalanceTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "autocommit interval", v: int64(cfg.autocommitInterval), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + + {v: int64(cfg.heartbeatInterval), allowed: int64(cfg.rebalanceTimeout) * int64(time.Millisecond), badcmp: i64gt, durs: true, fmt: "heartbeat interval %v is erroneously larger than the session timeout %v"}, + } { + bad, cmp := limit.badcmp(limit.v, limit.allowed) + if bad { + if limit.fmt != "" { + if limit.durs { + return fmt.Errorf(limit.fmt, time.Duration(limit.v), time.Duration(limit.allowed)) + } + return fmt.Errorf(limit.fmt, limit.v, limit.allowed) + } + if limit.durs { + return fmt.Errorf("%s %v is %s than allowed %v", limit.name, time.Duration(limit.v), cmp, time.Duration(limit.allowed)) + } + return fmt.Errorf("%s %v is %s than allowed %v", limit.name, limit.v, cmp, limit.allowed) + } + } + + if cfg.dialFn != nil { + if cfg.dialTLS != nil { + return errors.New("cannot set both Dialer and DialTLSConfig") + } + } + + if len(cfg.group) > 0 { + if len(cfg.partitions) != 0 { + return errors.New("invalid direct-partition consuming option when consuming as a group") + } + } + + if cfg.regex { + if len(cfg.partitions) != 0 { + return errors.New("invalid direct-partition consuming option when consuming as regex") + } + for re := range cfg.topics { + compiled, err := regexp.Compile(re) + if err != nil { + return fmt.Errorf("invalid regular expression %q", re) + } + cfg.topics[re] = compiled + } + } + + if cfg.topics != nil && cfg.partitions != nil { + for topic := range cfg.partitions { + if _, exists := cfg.topics[topic]; exists { + return fmt.Errorf("topic %q seen in both ConsumePartitions and ConsumeTopics; these options are a union, it is invalid to specify specific partitions for a topic while also consuming the entire topic", topic) + } + } + } + + if cfg.autocommitDisable && cfg.autocommitGreedy { + return errors.New("cannot both disable autocommitting and enable greedy autocommitting") + } + if cfg.autocommitDisable && cfg.autocommitMarks { + return errors.New("cannot both disable autocommitting and enable marked autocommitting") + } + if cfg.autocommitGreedy && cfg.autocommitMarks { + return errors.New("cannot enable both greedy autocommitting and marked autocommitting") + } + if (cfg.autocommitGreedy || cfg.autocommitDisable || cfg.autocommitMarks || cfg.setCommitCallback) && len(cfg.group) == 0 { + return errors.New("invalid autocommit options specified when a group was not specified") + } + if (cfg.setLost || cfg.setRevoked || cfg.setAssigned) && len(cfg.group) == 0 { + return errors.New("invalid group partition assigned/revoked/lost functions set when a group was not specified") + } + + processedHooks, err := processHooks(cfg.hooks) + if err != nil { + return err + } + cfg.hooks = processedHooks + + return nil +} + +// processHooks will inspect and recursively unpack slices of hooks stopping +// if the instance implements any hook interface. It will return an error on +// the first instance that implements no hook interface +func processHooks(hooks []Hook) ([]Hook, error) { + var processedHooks []Hook + for _, hook := range hooks { + if implementsAnyHook(hook) { + processedHooks = append(processedHooks, hook) + } else if moreHooks, ok := hook.([]Hook); ok { + more, err := processHooks(moreHooks) + if err != nil { + return nil, err + } + processedHooks = append(processedHooks, more...) + } else { + return nil, errors.New("found an argument that implements no hook interfaces") + } + } + return processedHooks, nil +} + +var reVersion = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9.-]*[a-zA-Z0-9])?$`) + +func softwareVersion() string { + info, ok := debug.ReadBuildInfo() + if ok { + for _, dep := range info.Deps { + if dep.Path == "github.com/twmb/franz-go" { + if reVersion.MatchString(dep.Version) { + return dep.Version + } + } + } + } + return "unknown" +} + +func defaultCfg() cfg { + defaultID := "kgo" + return cfg{ + ///////////// + // general // + ///////////// + id: &defaultID, + + dialTimeout: 10 * time.Second, + requestTimeoutOverhead: 10 * time.Second, + connIdleTimeout: 20 * time.Second, + + softwareName: "kgo", + softwareVersion: softwareVersion(), + + logger: new(nopLogger), + + seedBrokers: []string{"127.0.0.1"}, + maxVersions: kversion.Stable(), + + retryBackoff: func() func(int) time.Duration { + var rngMu sync.Mutex + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return func(fails int) time.Duration { + const ( + min = 250 * time.Millisecond + max = 5 * time.Second / 2 + ) + if fails <= 0 { + return min + } + if fails > 10 { + return max + } + + backoff := min * time.Duration(1<<(fails-1)) + + rngMu.Lock() + jitter := 0.8 + 0.4*rng.Float64() + rngMu.Unlock() + + backoff = time.Duration(float64(backoff) * jitter) + + if backoff > max { + return max + } + return backoff + } + }(), + retries: 20, + + maxBrokerWriteBytes: 100 << 20, // Kafka socket.request.max.bytes default is 100<<20 + maxBrokerReadBytes: 100 << 20, + + metadataMaxAge: 5 * time.Minute, + metadataMinAge: 5 * time.Second, + missingTopicDelete: 15 * time.Second, + + ////////////// + // producer // + ////////////// + + txnTimeout: 40 * time.Second, + acks: AllISRAcks(), + maxProduceInflight: 1, + compression: []CompressionCodec{SnappyCompression(), NoCompression()}, + maxRecordBatchBytes: 1000012, // Kafka max.message.bytes default is 1000012 + maxBufferedRecords: 10000, + produceTimeout: 10 * time.Second, + recordRetries: math.MaxInt64, // effectively unbounded + maxUnknownFailures: 4, + partitioner: UniformBytesPartitioner(64<<10, true, true, nil), + txnBackoff: 20 * time.Millisecond, + + ////////////// + // consumer // + ////////////// + + maxWait: 5000, + minBytes: 1, + maxBytes: 50 << 20, + maxPartBytes: 1 << 20, + resetOffset: NewOffset().AtStart(), + isolationLevel: 0, + + maxConcurrentFetches: 0, // unbounded default + + /////////// + // group // + /////////// + + balancers: []GroupBalancer{ + CooperativeStickyBalancer(), + }, + protocol: "consumer", + + sessionTimeout: 45000 * time.Millisecond, + rebalanceTimeout: 60000 * time.Millisecond, + heartbeatInterval: 3000 * time.Millisecond, + + autocommitInterval: 5 * time.Second, + } +} + +////////////////////////// +// CLIENT CONFIGURATION // +////////////////////////// + +// ClientID uses id for all requests sent to Kafka brokers, overriding the +// default "kgo". +func ClientID(id string) Opt { + return clientOpt{func(cfg *cfg) { cfg.id = &id }} +} + +// SoftwareNameAndVersion sets the client software name and version that will +// be sent to Kafka as part of the ApiVersions request as of Kafka 2.4, +// overriding the default "kgo" and internal version number. +// +// Kafka exposes this through metrics to help operators understand the impact +// of clients. +// +// It is generally not recommended to set this. As well, if you do, the name +// and version must match the following regular expression: +// +// [a-zA-Z0-9](?:[a-zA-Z0-9\.-]*[a-zA-Z0-9])? +// +// Note this means neither the name nor version can be empty. +func SoftwareNameAndVersion(name, version string) Opt { + return clientOpt{func(cfg *cfg) { cfg.softwareName = name; cfg.softwareVersion = version }} +} + +// WithLogger sets the client to use the given logger, overriding the default +// to not use a logger. +// +// It is invalid to use a nil logger; doing so will cause panics. +func WithLogger(l Logger) Opt { + return clientOpt{func(cfg *cfg) { cfg.logger = &wrappedLogger{l} }} +} + +// RequestTimeoutOverhead uses the given time as overhead while deadlining +// requests, overriding the default overhead of 10s. +// +// For most requests, the timeout is set to the overhead. However, for +// any request with a TimeoutMillis field, the overhead is added on top of the +// request's TimeoutMillis. This ensures that we give Kafka enough time to +// actually process the request given the timeout, while still having a +// deadline on the connection as a whole to ensure it does not hang. +// +// For writes, the timeout is always the overhead. We buffer writes in our +// client before one quick flush, so we always expect the write to be fast. +// +// Note that hitting the timeout kills a connection, which will fail any other +// active writes or reads on the connection. +// +// This option is roughly equivalent to request.timeout.ms, but grants +// additional time to requests that have timeout fields. +func RequestTimeoutOverhead(overhead time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.requestTimeoutOverhead = overhead }} +} + +// ConnIdleTimeout is a rough amount of time to allow connections to idle +// before they are closed, overriding the default 20. +// +// In the worst case, a connection can be allowed to idle for up to 2x this +// time, while the average is expected to be 1.5x (essentially, a uniform +// distribution from this interval to 2x the interval). +// +// It is possible that a connection can be reaped just as it is about to be +// written to, but the client internally retries in these cases. +// +// Connections are not reaped if they are actively being written to or read +// from; thus, a request can take a really long time itself and not be reaped +// (however, this may lead to the RequestTimeoutOverhead). +func ConnIdleTimeout(timeout time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.connIdleTimeout = timeout }} +} + +// Dialer uses fn to dial addresses, overriding the default dialer that uses a +// 10s dial timeout and no TLS. +// +// The context passed to the dial function is the context used in the request +// that caused the dial. If the request is a client-internal request, the +// context is the context on the client itself (which is canceled when the +// client is closed). +// +// This function has the same signature as net.Dialer's DialContext and +// tls.Dialer's DialContext, meaning you can use this function like so: +// +// kgo.Dialer((&net.Dialer{Timeout: 10*time.Second}).DialContext) +// +// or +// +// kgo.Dialer((&tls.Dialer{...}).DialContext) +func Dialer(fn func(ctx context.Context, network, host string) (net.Conn, error)) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialFn = fn }} +} + +// DialTimeout sets the dial timeout, overriding the default of 10s. This +// option is useful if you do not want to set a custom dialer, and is useful in +// tandem with DialTLSConfig. +func DialTimeout(timeout time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialTimeout = timeout }} +} + +// DialTLSConfig opts into dialing brokers with the given TLS config with a +// 10s dial timeout. This is a shortcut for manually specifying a tls dialer +// using the Dialer option. You can also change the default 10s timeout with +// DialTimeout. +// +// Every dial, the input config is cloned. If the config's ServerName is not +// specified, this function uses net.SplitHostPort to extract the host from the +// broker being dialed and sets the ServerName. In short, it is not necessary +// to set the ServerName. +func DialTLSConfig(c *tls.Config) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialTLS = c }} +} + +// DialTLS opts into dialing brokers with TLS. This is a shortcut for +// DialTLSConfig with an empty config. See DialTLSConfig for more details. +func DialTLS() Opt { + return DialTLSConfig(new(tls.Config)) +} + +// SeedBrokers sets the seed brokers for the client to use, overriding the +// default 127.0.0.1:9092. +// +// Any seeds that are missing a port use the default Kafka port 9092. +func SeedBrokers(seeds ...string) Opt { + return clientOpt{func(cfg *cfg) { cfg.seedBrokers = append(cfg.seedBrokers[:0], seeds...) }} +} + +// MaxVersions sets the maximum Kafka version to try, overriding the +// internal unbounded (latest stable) versions. +// +// Note that specific max version pinning is required if trying to interact +// with versions pre 0.10.0. Otherwise, unless using more complicated requests +// that this client itself does not natively use, it is generally safe to opt +// for the latest version. If using the kmsg package directly to issue +// requests, it is recommended to pin versions so that new fields on requests +// do not get invalid default zero values before you update your usage. +func MaxVersions(versions *kversion.Versions) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxVersions = versions }} +} + +// MinVersions sets the minimum Kafka version a request can be downgraded to, +// overriding the default of the lowest version. +// +// This option is useful if you are issuing requests that you absolutely do not +// want to be downgraded; that is, if you are relying on features in newer +// requests, and you are not sure if your brokers can handle those features. +// By setting a min version, if the client detects it needs to downgrade past +// the version, it will instead avoid issuing the request. +// +// Unlike MaxVersions, if a request is issued that is unknown to the min +// versions, the request is allowed. It is assumed that there is no lower bound +// for that request. +func MinVersions(versions *kversion.Versions) Opt { + return clientOpt{func(cfg *cfg) { cfg.minVersions = versions }} +} + +// RetryBackoffFn sets the backoff strategy for how long to backoff for a given +// amount of retries, overriding the default jittery exponential backoff that +// ranges from 250ms min to 2.5s max. +// +// This (roughly) corresponds to Kafka's retry.backoff.ms setting and +// retry.backoff.max.ms (which is being introduced with KIP-500). +func RetryBackoffFn(backoff func(int) time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.retryBackoff = backoff }} +} + +// RequestRetries sets the number of tries that retryable requests are allowed, +// overriding the default of 20s. +// +// This option does not apply to produce requests; to limit produce request +// retries / record retries, see RecordRetries. +func RequestRetries(n int) Opt { + return clientOpt{func(cfg *cfg) { cfg.retries = int64(n) }} +} + +// RetryTimeout sets the upper limit on how long we allow a request to be +// issued and then reissued on failure. That is, this control the total +// end-to-end maximum time we allow for trying a request, This overrides the +// default of: +// +// JoinGroup: cfg.SessionTimeout (default 45s) +// SyncGroup: cfg.SessionTimeout (default 45s) +// Heartbeat: cfg.SessionTimeout (default 45s) +// others: 30s +// +// This timeout applies to any request issued through a client's Request +// function. It does not apply to fetches nor produces. +// +// A value of zero indicates no request timeout. +// +// The timeout is evaluated after a request errors. If the time since the start +// of the first request plus any backoff for the latest failure is less than +// the retry timeout, the request will be issued again. +func RetryTimeout(t time.Duration) Opt { + return RetryTimeoutFn(func(int16) time.Duration { return t }) +} + +// RetryTimeoutFn sets the upper limit on how long we allow a request to be +// issued and then reissued on failure. That is, this control the total +// end-to-end maximum time we allow for trying a request, This overrides the +// default of: +// +// JoinGroup: cfg.SessionTimeout (default 45s) +// SyncGroup: cfg.SessionTimeout (default 45s) +// Heartbeat: cfg.SessionTimeout (default 45s) +// others: 30s +// +// This timeout applies to any request issued through a client's Request +// function. It does not apply to fetches nor produces. +// +// The function is called with the request key that is being retried. While it +// is not expected that the request key will be used, including it gives users +// the opportinuty to have different retry timeouts for different keys. +// +// If the function returns zero, there is no retry timeout. +// +// The timeout is evaluated after a request errors. If the time since the start +// of the first request plus any backoff for the latest failure is less than +// the retry timeout, the request will be issued again. +func RetryTimeoutFn(t func(int16) time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.retryTimeout = t }} +} + +// AllowAutoTopicCreation enables topics to be auto created if they do +// not exist when fetching their metadata. +func AllowAutoTopicCreation() Opt { + return clientOpt{func(cfg *cfg) { cfg.allowAutoTopicCreation = true }} +} + +// BrokerMaxWriteBytes upper bounds the number of bytes written to a broker +// connection in a single write, overriding the default 100MiB. +// +// This number corresponds to the a broker's socket.request.max.bytes, which +// defaults to 100MiB. +// +// The only Kafka request that could come reasonable close to hitting this +// limit should be produce requests, and thus this limit is only enforced for +// produce requests. +func BrokerMaxWriteBytes(v int32) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxBrokerWriteBytes = v }} +} + +// BrokerMaxReadBytes sets the maximum response size that can be read from +// Kafka, overriding the default 100MiB. +// +// This is a safety measure to avoid OOMing on invalid responses. This is +// slightly double FetchMaxBytes; if bumping that, consider bump this. No other +// response should run the risk of hitting this limit. +func BrokerMaxReadBytes(v int32) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxBrokerReadBytes = v }} +} + +// MetadataMaxAge sets the maximum age for the client's cached metadata, +// overriding the default 5m, to allow detection of new topics, partitions, +// etc. +// +// This corresponds to Kafka's metadata.max.age.ms. +func MetadataMaxAge(age time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.metadataMaxAge = age }} +} + +// MetadataMinAge sets the minimum time between metadata queries, overriding +// the default 5s. You may want to raise or lower this to reduce the number of +// metadata queries the client will make. Notably, if metadata detects an error +// in any topic or partition, it triggers itself to update as soon as allowed. +func MetadataMinAge(age time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.metadataMinAge = age }} +} + +// SASL appends sasl authentication options to use for all connections. +// +// SASL is tried in order; if the broker supports the first mechanism, all +// connections will use that mechanism. If the first mechanism fails, the +// client will pick the first supported mechanism. If the broker does not +// support any client mechanisms, connections will fail. +func SASL(sasls ...sasl.Mechanism) Opt { + return clientOpt{func(cfg *cfg) { cfg.sasls = append(cfg.sasls, sasls...) }} +} + +// WithHooks sets hooks to call whenever relevant. +// +// Hooks can be used to layer in metrics (such as Prometheus hooks) or anything +// else. The client will call all hooks in order. See the Hooks interface for +// more information, as well as any interface that contains "Hook" in the name +// to know the available hooks. A single hook can implement zero or all hook +// interfaces, and only the hooks that it implements will be called. +func WithHooks(hooks ...Hook) Opt { + return clientOpt{func(cfg *cfg) { cfg.hooks = append(cfg.hooks, hooks...) }} +} + +// ConcurrentTransactionsBackoff sets the backoff interval to use during +// transactional requests in case we encounter CONCURRENT_TRANSACTIONS error, +// overriding the default 20ms. +// +// Sometimes, when a client begins a transaction quickly enough after finishing +// a previous one, Kafka will return a CONCURRENT_TRANSACTIONS error. Clients +// are expected to backoff slightly and retry the operation. Lower backoffs may +// increase load on the brokers, while higher backoffs may increase transaction +// latency in clients. +// +// Note that if brokers are hanging in this concurrent transactions state for +// too long, the client progressively increases the backoff. +func ConcurrentTransactionsBackoff(backoff time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.txnBackoff = backoff }} +} + +// ConsiderMissingTopicDeletedAfter sets the amount of time a topic can be +// missing from metadata responses _after_ loading it at least once before it +// is considered deleted, overriding the default of 15s. Note that for newer +// versions of Kafka, it may take a bit of time (~15s) for the cluster to fully +// recognize a newly created topic. If this option is set too low, there is +// some risk that the client will internally purge and re-see a topic a few +// times until the cluster fully broadcasts the topic creation. +func ConsiderMissingTopicDeletedAfter(t time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.missingTopicDelete = t }} +} + +//////////////////////////// +// PRODUCER CONFIGURATION // +//////////////////////////// + +// DefaultProduceTopic sets the default topic to produce to if the topic field +// is empty in a Record. +// +// If this option is not used, if a record has an empty topic, the record +// cannot be produced and will be failed immediately. +func DefaultProduceTopic(t string) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.defaultProduceTopic = t }} +} + +// Acks represents the number of acks a broker leader must have before +// a produce request is considered complete. +// +// This controls the durability of written records and corresponds to "acks" in +// Kafka's Producer Configuration documentation. +// +// The default is LeaderAck. +type Acks struct { + val int16 +} + +// NoAck considers records sent as soon as they are written on the wire. +// The leader does not reply to records. +func NoAck() Acks { return Acks{0} } + +// LeaderAck causes Kafka to reply that a record is written after only +// the leader has written a message. The leader does not wait for in-sync +// replica replies. +func LeaderAck() Acks { return Acks{1} } + +// AllISRAcks ensures that all in-sync replicas have acknowledged they +// wrote a record before the leader replies success. +func AllISRAcks() Acks { return Acks{-1} } + +// RequiredAcks sets the required acks for produced records, +// overriding the default RequireAllISRAcks. +func RequiredAcks(acks Acks) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.acks = acks }} +} + +// DisableIdempotentWrite disables idempotent produce requests, opting out of +// Kafka server-side deduplication in the face of reissued requests due to +// transient network problems. Disabling idempotent write by default +// upper-bounds the number of in-flight produce requests per broker to 1, vs. +// the default of 5 when using idempotency. +// +// Idempotent production is strictly a win, but does require the +// IDEMPOTENT_WRITE permission on CLUSTER (pre Kafka 3.0), and not all clients +// can have that permission. +// +// This option is incompatible with specifying a transactional id. +func DisableIdempotentWrite() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.disableIdempotency = true }} +} + +// MaxProduceRequestsInflightPerBroker changes the number of allowed produce +// requests in flight per broker if you disable idempotency, overriding the +// default of 1. If using idempotency, this option has no effect: the maximum +// in flight for Kafka v0.11 is 1, and from v1 onward is 5. +// +// Using more than 1 may result in out of order records and may result in +// duplicates if there are connection issues. +func MaxProduceRequestsInflightPerBroker(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxProduceInflight = n }} +} + +// ProducerBatchCompression sets the compression codec to use for producing +// records. +// +// Compression is chosen in the order preferred based on broker support. For +// example, zstd compression was introduced in Kafka 2.1, so the preference +// can be first zstd, fallback snappy, fallback none. +// +// The default preference is [snappy, none], which should be fine for all old +// consumers since snappy compression has existed since Kafka 0.8.0. To use +// zstd, your brokers must be at least 2.1 and all consumers must be upgraded +// to support decoding zstd records. +func ProducerBatchCompression(preference ...CompressionCodec) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.compression = preference }} +} + +// ProducerBatchMaxBytes upper bounds the size of a record batch, overriding +// the default 1,000,012 bytes. This mirrors Kafka's max.message.bytes. +// +// Record batches are independent of a ProduceRequest: a record batch is +// specific to a topic and partition, whereas the produce request can contain +// many record batches for many topics. +// +// If a single record encodes larger than this number (before compression), it +// will will not be written and a callback will have the appropriate error. +// +// Note that this is the maximum size of a record batch before compression. If +// a batch compresses poorly and actually grows the batch, the uncompressed +// form will be used. +func ProducerBatchMaxBytes(v int32) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxRecordBatchBytes = v }} +} + +// MaxBufferedRecords sets the max amount of records the client will buffer, +// blocking produces until records are finished if this limit is reached. +// This overrides the default of 10,000. +func MaxBufferedRecords(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxBufferedRecords = int64(n) }} +} + +// MaxBufferedBytes sets the max amount of bytes that the client will buffer +// while producing, blocking produces until records are finished if this limit +// is reached. This overrides the unlimited default. +// +// Note that this option does _not_ apply for consuming: the client cannot +// limit bytes buffered for consuming because of decompression. You can roughly +// control consuming memory by using [MaxConcurrentFetches], [FetchMaxBytes], +// and [FetchMaxPartitionBytes]. +// +// If you produce a record that is larger than n, the record is immediately +// failed with kerr.MessageTooLarge. +// +// Note that this limit applies after [MaxBufferedRecords]. +func MaxBufferedBytes(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxBufferedBytes = int64(n) }} +} + +// RecordPartitioner uses the given partitioner to partition records, overriding +// the default UniformBytesPartitioner(64KiB, true, true, nil). +func RecordPartitioner(partitioner Partitioner) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.partitioner = partitioner }} +} + +// ProduceRequestTimeout sets how long Kafka broker's are allowed to respond to +// produce requests, overriding the default 10s. If a broker exceeds this +// duration, it will reply with a request timeout error. +// +// This somewhat corresponds to Kafka's request.timeout.ms setting, but only +// applies to produce requests. This settings sets the TimeoutMillis field in +// the produce request itself. The RequestTimeoutOverhead is applied as a write +// limit and read limit in addition to this. +func ProduceRequestTimeout(limit time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.produceTimeout = limit }} +} + +// RecordRetries sets the number of tries for producing records, overriding the +// unlimited default. +// +// If idempotency is enabled (as it is by default), this option is only +// enforced if it is safe to do so without creating invalid sequence numbers. +// It is safe to enforce if a record was never issued in a request to Kafka, or +// if it was requested and received a response. +// +// If a record fails due to retries, all records buffered in the same partition +// are failed as well. This ensures gapless ordering: the client will not fail +// one record only to produce a later one successfully. This also allows for +// easier sequence number ordering internally. +// +// If a topic repeatedly fails to load with UNKNOWN_TOPIC_OR_PARTITION, it has +// a different limit (the UnknownTopicRetries option). All records for a topic +// that repeatedly cannot be loaded are failed when that limit is hit. +// +// This option is different from RequestRetries to allow finer grained control +// of when to fail when producing records. +func RecordRetries(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.recordRetries = int64(n) }} +} + +// UnknownTopicRetries sets the number of times a record can fail with +// UNKNOWN_TOPIC_OR_PARTITION, overriding the default 4. +// +// This is a separate limit from RecordRetries because unknown topic or +// partition errors should only happen if the topic does not exist. It is +// pointless for the client to continue producing to a topic that does not +// exist, and if we repeatedly see that the topic does not exist across +// multiple metadata queries (which are going to different brokers), then we +// may as well stop trying and fail the records. +// +// If this is -1, the client never fails records with this error. +func UnknownTopicRetries(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxUnknownFailures = int64(n) }} +} + +// StopProducerOnDataLossDetected sets the client to stop producing if data +// loss is detected, overriding the default false. +// +// Note that if using this option, it is strongly recommended to not have a +// retry limit. Doing so may lead to errors where the client fails a batch on a +// recoverable error, which internally bumps the idempotent sequence number +// used for producing, which may then later cause an inadvertent out of order +// sequence number and false "data loss" detection. +func StopProducerOnDataLossDetected() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.stopOnDataLoss = true }} +} + +// ProducerOnDataLossDetected sets a function to call if data loss is detected +// when producing records if the client is configured to continue on data loss. +// Thus, this option is mutually exclusive with StopProducerOnDataLossDetected. +// +// The passed function will be called with the topic and partition that data +// loss was detected on. +func ProducerOnDataLossDetected(fn func(string, int32)) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.onDataLoss = fn }} +} + +// ProducerLinger sets how long individual topic partitions will linger waiting +// for more records before triggering a request to be built. +// +// Note that this option should only be used in low volume producers. The only +// benefit of lingering is to potentially build a larger batch to reduce cpu +// usage on the brokers if you have many producers all producing small amounts. +// +// If a produce request is triggered by any topic partition, all partitions +// with a possible batch to be sent are used and all lingers are reset. +// +// As mentioned, the linger is specific to topic partition. A high volume +// producer will likely be producing to many partitions; it is both unnecessary +// to linger in this case and inefficient because the client will have many +// timers running (and stopping and restarting) unnecessarily. +func ProducerLinger(linger time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.linger = linger }} +} + +// ManualFlushing disables auto-flushing when producing. While you can still +// set lingering, it would be useless to do so. +// +// With manual flushing, producing while MaxBufferedRecords or MaxBufferedBytes +// have already been produced and not flushed will return ErrMaxBuffered. +func ManualFlushing() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.manualFlushing = true }} +} + +// RecordDeliveryTimeout sets a rough time of how long a record can sit around +// in a batch before timing out, overriding the unlimited default. +// +// If idempotency is enabled (as it is by default), this option is only +// enforced if it is safe to do so without creating invalid sequence numbers. +// It is safe to enforce if a record was never issued in a request to Kafka, or +// if it was requested and received a response. +// +// The timeout for all records in a batch inherit the timeout of the first +// record in that batch. That is, once the first record's timeout expires, all +// records in the batch are expired. This generally is a non-issue unless using +// this option with lingering. In that case, simply add the linger to the +// record timeout to avoid problems. +// +// If a record times out, all records buffered in the same partition are failed +// as well. This ensures gapless ordering: the client will not fail one record +// only to produce a later one successfully. This also allows for easier +// sequence number ordering internally. +// +// The timeout is only evaluated evaluated before writing a request or after a +// produce response. Thus, a sink backoff may delay record timeout slightly. +// +// This option is roughly equivalent to delivery.timeout.ms. +func RecordDeliveryTimeout(timeout time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.recordTimeout = timeout }} +} + +// TransactionalID sets a transactional ID for the client, ensuring that +// records are produced transactionally under this ID (exactly once semantics). +// +// For Kafka-to-Kafka transactions, the transactional ID is only one half of +// the equation. You must also assign a group to consume from. +// +// To produce transactionally, you first BeginTransaction, then produce records +// consumed from a group, then you EndTransaction. All records produced outside +// of a transaction will fail immediately with an error. +// +// After producing a batch, you must commit what you consumed. Auto committing +// offsets is disabled during transactional consuming / producing. +// +// Note that unless using Kafka 2.5, a consumer group rebalance may be +// problematic. Production should finish and be committed before the client +// rejoins the group. It may be safer to use an eager group balancer and just +// abort the transaction. Alternatively, any time a partition is revoked, you +// could abort the transaction and reset offsets being consumed. +// +// If the client detects an unrecoverable error, all records produced +// thereafter will fail. +// +// Lastly, the default read level is READ_UNCOMMITTED. Be sure to use the +// ReadIsolationLevel option if you want to only read committed. +func TransactionalID(id string) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.txnID = &id }} +} + +// TransactionTimeout sets the allowed for a transaction, overriding the +// default 40s. It is a good idea to keep this less than a group's session +// timeout, so that a group member will always be alive for the duration of a +// transaction even if connectivity dies. This helps prevent a transaction +// finishing after a rebalance, which is problematic pre-Kafka 2.5. If you +// are on Kafka 2.5+, then you can use the RequireStableFetchOffsets option +// when assigning the group, and you can set this to whatever you would like. +// +// Transaction timeouts begin when the first record is produced within a +// transaction, not when a transaction begins. +func TransactionTimeout(timeout time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.txnTimeout = timeout }} +} + +//////////////////////////// +// CONSUMER CONFIGURATION // +//////////////////////////// + +// FetchMaxWait sets the maximum amount of time a broker will wait for a +// fetch response to hit the minimum number of required bytes before returning, +// overriding the default 5s. +// +// This corresponds to the Java fetch.max.wait.ms setting. +func FetchMaxWait(wait time.Duration) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxWait = int32(wait.Milliseconds()) }} +} + +// FetchMaxBytes sets the maximum amount of bytes a broker will try to send +// during a fetch, overriding the default 50MiB. Note that brokers may not obey +// this limit if it has records larger than this limit. Also note that this +// client sends a fetch to each broker concurrently, meaning the client will +// buffer up to worth of memory. +// +// This corresponds to the Java fetch.max.bytes setting. +// +// If bumping this, consider bumping BrokerMaxReadBytes. +// +// If what you are consuming is compressed, and compressed well, it is strongly +// recommended to set this option so that decompression does not eat all of +// your RAM. +func FetchMaxBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxBytes = lazyI32(b) }} +} + +// FetchMinBytes sets the minimum amount of bytes a broker will try to send +// during a fetch, overriding the default 1 byte. +// +// With the default of 1, data is sent as soon as it is available. By bumping +// this, the broker will try to wait for more data, which may improve server +// throughput at the expense of added latency. +// +// This corresponds to the Java fetch.min.bytes setting. +func FetchMinBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.minBytes = b }} +} + +// FetchMaxPartitionBytes sets the maximum amount of bytes that will be +// consumed for a single partition in a fetch request, overriding the default +// 1MiB. Note that if a single batch is larger than this number, that batch +// will still be returned so the client can make progress. +// +// This corresponds to the Java max.partition.fetch.bytes setting. +func FetchMaxPartitionBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxPartBytes = lazyI32(b) }} +} + +// MaxConcurrentFetches sets the maximum number of fetch requests to allow in +// flight or buffered at once, overriding the unbounded (i.e. number of +// brokers) default. +// +// This setting, paired with FetchMaxBytes, can upper bound the maximum amount +// of memory that the client can use for consuming. +// +// Requests are issued to brokers in a FIFO order: once the client is ready to +// issue a request to a broker, it registers that request and issues it in +// order with other registrations. +// +// If Kafka replies with any data, the client does not track the fetch as +// completed until the user has polled the buffered fetch. Thus, a concurrent +// fetch is not considered complete until all data from it is done being +// processed and out of the client itself. +// +// Note that brokers are allowed to hang for up to FetchMaxWait before replying +// to a request, so if this option is too constrained and you are consuming a +// low throughput topic, the client may take a long time before requesting a +// broker that has new data. For high throughput topics, or if the allowed +// concurrent fetches is large enough, this should not be a concern. +// +// A value of 0 implies the allowed concurrency is unbounded and will be +// limited only by the number of brokers in the cluster. +func MaxConcurrentFetches(n int) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxConcurrentFetches = n }} +} + +// ConsumeResetOffset sets the offset to start consuming from, or if +// OffsetOutOfRange is seen while fetching, to restart consuming from. The +// default is NewOffset().AtStart(), i.e., the earliest offset. +// +// For direct consumers, this is the offset that partitions begin to consume +// from. For group consumers, this is the offset that partitions begin to +// consume from if a partition has no commits. If partitions have commits, the +// commit offset is used. While fetching, if OffsetOutOfRange is encountered, +// the partition resets to ConsumeResetOffset. Using [NoResetOffset] stops +// consuming a partition if the client encounters OffsetOutOfRange. Using +// [Offset.AtCommitted] prevents consuming a partition in a group if the +// partition has no prior commits. +// +// If you use an exact offset or relative offsets and the offset ends up out of +// range, the client chooses the nearest of either the log start offset or the +// high watermark: using At(3) when the partition starts at 8 results in the +// partition being consumed from offset 8. +// +// In short form, the following determines the offset for when a partition is +// seen for the first time, or reset while fetching: +// +// reset at start? => log start offset +// reset at end? => high watermark +// reset at exact? => this exact offset (3 means offset 3) +// reset relative? => the above, + / - the relative amount +// reset exact or relative out of bounds? => nearest boundary (start or end) +// reset after millisec? => high watermark, or first offset after millisec if one exists +// +// To match Kafka's auto.offset.reset, +// +// NewOffset().AtStart() == auto.offset.reset "earliest" +// NewOffset().AtEnd() == auto.offset.reset "latest" +// NewOffset().AtCommitted() == auto.offset.reset "none" +// +// With the above, make sure to use NoResetOffset() if you want to stop +// consuming when you encounter OffsetOutOfRange. It is highly recommended +// to read the docs for all Offset methods to see a few other alternatives. +func ConsumeResetOffset(offset Offset) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.resetOffset = offset }} +} + +// Rack specifies where the client is physically located and changes fetch +// requests to consume from the closest replica as opposed to the leader +// replica. +// +// Consuming from a preferred replica can increase latency but can decrease +// cross datacenter costs. See KIP-392 for more information. +func Rack(rack string) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.rack = rack }} +} + +// IsolationLevel controls whether uncommitted or only committed records are +// returned from fetch requests. +type IsolationLevel struct { + level int8 +} + +// ReadUncommitted (the default) is an isolation level that returns the latest +// produced records, be they committed or not. +func ReadUncommitted() IsolationLevel { return IsolationLevel{0} } + +// ReadCommitted is an isolation level to only fetch committed records. +func ReadCommitted() IsolationLevel { return IsolationLevel{1} } + +// FetchIsolationLevel sets the "isolation level" used for fetching +// records, overriding the default ReadUncommitted. +func FetchIsolationLevel(level IsolationLevel) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.isolationLevel = level.level }} +} + +// KeepControlRecords sets the client to keep control messages and return +// them with fetches, overriding the default that discards them. +// +// Generally, control messages are not useful. +func KeepControlRecords() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.keepControl = true }} +} + +// ConsumeTopics adds topics to use for consuming. +// +// By default, consuming will start at the beginning of partitions. To change +// this, use the ConsumeResetOffset option. +func ConsumeTopics(topics ...string) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { + cfg.topics = make(map[string]*regexp.Regexp, len(topics)) + for _, topic := range topics { + cfg.topics[topic] = nil + } + }} +} + +// ConsumePartitions sets partitions to consume from directly and the offsets +// to start consuming those partitions from. +// +// This option is basically a way to explicitly consume from subsets of +// partitions in topics, or to consume at exact offsets. Offsets from this +// option have higher precedence than the ConsumeResetOffset. +// +// This option is not compatible with group consuming and regex consuming. If +// you want to assign partitions directly, but still use Kafka to commit +// offsets, check out the kadm package's FetchOffsets and CommitOffsets +// methods. These will allow you to commit as a group outside the context of a +// Kafka group. +func ConsumePartitions(partitions map[string]map[int32]Offset) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.partitions = partitions }} +} + +// ConsumeRegex sets the client to parse all topics passed to ConsumeTopics as +// regular expressions. +// +// When consuming via regex, every metadata request loads *all* topics, so that +// all topics can be passed to any regular expressions. Every topic is +// evaluated only once ever across all regular expressions; either it +// permanently is known to match, or is permanently known to not match. +func ConsumeRegex() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.regex = true }} +} + +// DisableFetchSessions sets the client to not use fetch sessions (Kafka 1.0+). +// +// A "fetch session" is is a way to reduce bandwidth for fetch requests & +// responses, and to potentially reduce the amount of work that brokers have to +// do to handle fetch requests. A fetch session opts into the broker tracking +// some state of what the client is interested in. For example, say that you +// are interested in thousands of topics, and most of these topics are +// receiving data only rarely. A fetch session allows the client to register +// that it is interested in those thousands of topics on the first request. On +// future requests, if the offsets for these topics have not changed, those +// topics will be elided from the request. The broker knows to reply with the +// extra topics if any new data is available, otherwise the topics are also +// elided from the response. This massively reduces the amount of information +// that needs to be included in requests or responses. +// +// Using fetch sessions means more state is stored on brokers. Maintaining this +// state eats some memory. If you have thousands of consumers, you may not want +// fetch sessions to be used for everything. Brokers intelligently handle this +// by not creating sessions if they are at their configured limit, but you may +// consider disabling sessions if they are generally not useful to you. Brokers +// have metrics for the number of fetch sessions active, so you can monitor +// that to determine whether enabling or disabling sessions is beneficial or +// not. +// +// For more details on fetch sessions, see KIP-227. +func DisableFetchSessions() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.disableFetchSessions = true }} +} + +// ConsumePreferringLagFn allows you to re-order partitions before they are +// fetched, given each partition's current lag. +// +// By default, the client rotates partitions fetched by one after every fetch +// request. Kafka answers fetch requests in the order that partitions are +// requested, filling the fetch response until FetchMaxBytes and +// FetchMaxPartitionBytes are hit. All partitions eventually rotate to the +// front, ensuring no partition is starved. +// +// With this option, you can return topic order and per-topic partition +// ordering. These orders will sort to the front (first by topic, then by +// partition). Any topic or partitions that you do not return are added to the +// end, preserving their original ordering. +// +// For a simple lag preference that sorts the laggiest topics and partitions +// first, use `kgo.ConsumePreferringLagFn(kgo.PreferLagAt(50))` (or some other +// similar lag number). +func ConsumePreferringLagFn(fn PreferLagFn) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.preferLagFn = fn }} +} + +// KeepRetryableFetchErrors switches the client to always return any retryable +// broker error when fetching, rather than stripping them. By default, the +// client strips retryable errors from fetch responses; these are usually +// signals that a client needs to update its metadata to learn of where a +// partition has moved to (from one broker to another), or they are signals +// that one broker is temporarily unhealthy (broker not available). You can opt +// into keeping these errors if you want to specifically react to certain +// events. For example, if you want to react to you yourself deleting a topic, +// you can watch for either UNKNOWN_TOPIC_OR_PARTITION or UNKNOWN_TOPIC_ID +// errors being returned in fetches (and ignore the other errors). +func KeepRetryableFetchErrors() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.keepRetryableFetchErrors = true }} +} + +////////////////////////////////// +// CONSUMER GROUP CONFIGURATION // +////////////////////////////////// + +// ConsumerGroup sets the consumer group for the client to join and consume in. +// This option is required if using any other group options. +// +// Note that when group consuming, the default is to autocommit every 5s. To be +// safe, autocommitting only commits what is *previously* polled. If you poll +// once, nothing will be committed. If you poll again, the first poll is +// available to be committed. This ensures at-least-once processing, but does +// mean there is likely some duplicate processing during rebalances. When your +// client shuts down, you should issue one final synchronous commit before +// leaving the group (because you will not be polling again, and you are not +// waiting for an autocommit). +func ConsumerGroup(group string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.group = group }} +} + +// Balancers sets the group balancers to use for dividing topic partitions +// among group members, overriding the current default [cooperative-sticky]. +// This option is equivalent to Kafka's partition.assignment.strategies option. +// +// For balancing, Kafka chooses the first protocol that all group members agree +// to support. +// +// Note that if you opt into cooperative-sticky rebalancing, cooperative group +// balancing is incompatible with eager (classical) rebalancing and requires a +// careful rollout strategy (see KIP-429). +func Balancers(balancers ...GroupBalancer) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.balancers = balancers }} +} + +// SessionTimeout sets how long a member in the group can go between +// heartbeats, overriding the default 45,000ms. If a member does not heartbeat +// in this timeout, the broker will remove the member from the group and +// initiate a rebalance. +// +// If you are using a GroupTransactSession for EOS, wish to lower this, and are +// talking to a Kafka cluster pre 2.5, consider lowering the +// TransactionTimeout. If you do not, you risk a transaction finishing after a +// group has rebalanced, which could lead to duplicate processing. If you are +// talking to a Kafka 2.5+ cluster, you can safely use the +// RequireStableFetchOffsets group option and prevent any problems. +// +// This option corresponds to Kafka's session.timeout.ms setting and must be +// within the broker's group.min.session.timeout.ms and +// group.max.session.timeout.ms. +func SessionTimeout(timeout time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.sessionTimeout = timeout }} +} + +// RebalanceTimeout sets how long group members are allowed to take when a a +// rebalance has begun, overriding the default 60,000ms. This timeout is how +// long all members are allowed to complete work and commit offsets, minus the +// time it took to detect the rebalance (from a heartbeat). +// +// Kafka uses the largest rebalance timeout of all members in the group. If a +// member does not rejoin within this timeout, Kafka will kick that member from +// the group. +// +// This corresponds to Kafka's rebalance.timeout.ms. +func RebalanceTimeout(timeout time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.rebalanceTimeout = timeout }} +} + +// HeartbeatInterval sets how long a group member goes between heartbeats to +// Kafka, overriding the default 3,000ms. +// +// Kafka uses heartbeats to ensure that a group member's session stays active. +// This value can be any value lower than the session timeout, but should be no +// higher than 1/3rd the session timeout. +// +// This corresponds to Kafka's heartbeat.interval.ms. +func HeartbeatInterval(interval time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.heartbeatInterval = interval }} +} + +// RequireStableFetchOffsets sets the group consumer to require "stable" fetch +// offsets before consuming from the group. Proposed in KIP-447 and introduced +// in Kafka 2.5, stable offsets are important when consuming from partitions +// that a transactional producer could be committing to. +// +// With this option, Kafka will block group consumers from fetching offsets for +// partitions that are in an active transaction. This option is **strongly** +// recommended to help prevent duplication problems. See this repo's KIP-447 +// doc to learn more. +// +// Because this can block consumption, it is strongly recommended to set +// transactional timeouts to a small value (10s) rather than the default 60s. +// Lowering the transactional timeout will reduce the chance that consumers are +// entirely blocked. +func RequireStableFetchOffsets() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.requireStable = true }} +} + +// BlockRebalanceOnPoll switches the client to block rebalances whenever you +// poll until you explicitly call AllowRebalance. This option also ensures that +// any OnPartitions{Assigned,Revoked,Lost} callbacks are only called when you +// allow rebalances; they cannot be called if you have polled and are +// processing records. +// +// By default, a consumer group is managed completely independently of +// consuming. A rebalance may occur at any moment. If you poll records, and +// then a rebalance happens, and then you commit, you may be committing to +// partitions you no longer own. This will result in duplicates. In the worst +// case, you could rewind commits that a different member has already made +// (risking duplicates if another rebalance were to happen before that other +// member commits again). +// +// By blocking rebalancing after you poll until you call AllowRebalances, you +// can be sure that you commit records that your member currently owns. +// However, the big tradeoff is that by blocking rebalances, you put your group +// member at risk of waiting so long that the group member is kicked from the +// group because it exceeded the rebalance timeout. To compare clients, Sarama +// takes the default choice of blocking rebalancing; this option makes kgo more +// similar to Sarama. +// +// If you use this option, you should ensure that you always process records +// quickly, and that your OnPartitions{Assigned,Revoked,Lost} callbacks are +// fast. It is recommended you also use PollRecords rather than PollFetches so +// that you can bound how many records you process at once. You must always +// AllowRebalances when you are done processing the records you received. Only +// rebalances that lose partitions are blocked; rebalances that are strictly +// net additions or non-modifications do not block (the On callbacks are always +// blocked so that you can ensure their serialization). +// +// This function can largely replace any commit logic you may want to do in +// OnPartitionsRevoked. +// +// Lastly, note that this actually blocks any rebalance from calling +// OnPartitions{Assigned,Revoked,Lost}. If you are using a cooperative +// rebalancer such as CooperativeSticky, a rebalance can begin right before you +// poll, and you will still receive records because no partitions are lost yet. +// The in-progress rebalance only blocks if you are assigned new partitions or +// if any of your partitions are revoked. +func BlockRebalanceOnPoll() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.blockRebalanceOnPoll = true }} +} + +// AdjustFetchOffsetsFn sets the function to be called when a group is joined +// after offsets are fetched so that a user can adjust offsets before +// consumption begins. +// +// This function should not exceed the rebalance interval. It is possible +// for the group, immediately after finishing a balance, to re-enter a new balancing +// session. This function is passed a context that is canceled if the current group +// session finishes (i.e., after revoking). +// +// If you are resetting the position of the offset, you may want to clear any existing +// "epoch" with WithEpoch(-1). If the epoch is non-negative, the client performs +// data loss detection, which may result in errors and unexpected behavior. +// +// This function is called after OnPartitionsAssigned and may be called before +// or after OnPartitionsRevoked. +func AdjustFetchOffsetsFn(adjustOffsetsBeforeAssign func(context.Context, map[string]map[int32]Offset) (map[string]map[int32]Offset, error)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.adjustOffsetsBeforeAssign = adjustOffsetsBeforeAssign }} +} + +// OnPartitionsAssigned sets the function to be called when a group is joined +// after partitions are assigned before fetches for those partitions begin. +// +// This function, combined with OnPartitionsRevoked, should not exceed the +// rebalance interval. It is possible for the group to re-enter a new balancing +// session immediately after finishing a balance. +// +// This function is passed the client's context, which is only canceled if the +// client is closed. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +func OnPartitionsAssigned(onAssigned func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onAssigned, cfg.setAssigned = onAssigned, true }} +} + +// OnPartitionsRevoked sets the function to be called once this group member +// has partitions revoked. +// +// This function, combined with [OnPartitionsAssigned], should not exceed the +// rebalance interval. It is possible for the group to re-enter a new balancing +// session immediately after finishing a balance. +// +// If autocommit is enabled, the default OnPartitionsRevoked is a blocking +// commit of all non-dirty offsets (where "dirty" is the most recent poll). +// +// The OnPartitionsRevoked function is passed the client's context, which is +// only canceled if the client is closed. OnPartitionsRevoked function is +// called at the end of a group session even if there are no partitions being +// revoked. If you are committing offsets manually (have disabled +// autocommitting), it is highly recommended to do a proper blocking commit in +// OnPartitionsRevoked. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +// +// This function is called if a "fatal" group error is encountered and you have +// not set [OnPartitionsLost]. See OnPartitionsLost for more details. +func OnPartitionsRevoked(onRevoked func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onRevoked, cfg.setRevoked = onRevoked, true }} +} + +// OnPartitionsLost sets the function to be called on "fatal" group errors, +// such as IllegalGeneration, UnknownMemberID, and authentication failures. +// This function differs from [OnPartitionsRevoked] in that it is unlikely that +// commits will succeed when partitions are outright lost, whereas commits +// likely will succeed when revoking partitions. +// +// Because this function is called on any fatal group error, it is possible for +// this function to be called without the group ever being joined. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +func OnPartitionsLost(onLost func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onLost, cfg.setLost = onLost, true }} +} + +// OnOffsetsFetched sets a function to be called after offsets have been +// fetched after a group has been balanced. This function is meant to allow +// users to inspect offset commit metadata. An error can be returned to exit +// this group session and exit back to join group. +// +// This function should not exceed the rebalance interval. It is possible for +// the group, immediately after finishing a balance, to re-enter a new +// balancing session. This function is passed a context that is canceled if the +// current group session finishes (i.e., after revoking). +// +// This function is called after OnPartitionsAssigned and may be called before +// or after OnPartitionsRevoked. +func OnOffsetsFetched(onFetched func(context.Context, *Client, *kmsg.OffsetFetchResponse) error) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onFetched = onFetched }} +} + +// DisableAutoCommit disable auto committing. +// +// If you disable autocommitting, you may want to use a custom +// OnPartitionsRevoked, otherwise you may end up doubly processing records +// (which is fine, just leads to duplicate processing). Consider the scenario: +// you, member A, are processing partition 0, and previously committed offset 4 +// and have now locally processed through offset 30. A rebalance happens, and +// partition 0 moves to member B. If you use OnPartitionsRevoked, you can +// detect that you are losing this partition and commit your work through +// offset 30, so that member B can start processing at offset 30. If you do not +// commit (i.e. you do not use a custom OnPartitionsRevoked), the other member +// will start processing at offset 4. It may process through offset 50, leading +// to double processing of offsets 4 through 29. Worse, you, member A, can +// rewind member B's commit, because member B may commit offset 50 and you may +// finally eventually commit offset 30. If a rebalance happens, then even more +// duplicate processing will occur of offsets 30 through 49. +// +// Again, OnPartitionsRevoked is not necessary, and not using it just means +// double processing, which for most workloads is fine since a simple group +// consumer is not EOS / transactional, only at-least-once. But, this is +// something to be aware of. +func DisableAutoCommit() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitDisable = true }} +} + +// GreedyAutoCommit opts into committing everything that has been polled when +// autocommitting (the dirty offsets), rather than committing what has +// previously been polled. This option may result in message loss if your +// application crashes. +func GreedyAutoCommit() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitGreedy = true }} +} + +// AutoCommitInterval sets how long to go between autocommits, overriding the +// default 5s. +func AutoCommitInterval(interval time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitInterval = interval }} +} + +// AutoCommitMarks switches the autocommitting behavior to only commit "marked" +// records, which can be done with the MarkCommitRecords method. +// +// This option is basically a halfway point between autocommitting and manually +// committing. If you have slow batch processing of polls, then you can +// manually mark records to be autocommitted before you poll again. This way, +// if you usually take a long time between polls, your partial work can still +// be automatically checkpointed through autocommitting. +func AutoCommitMarks() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitMarks = true }} +} + +// InstanceID sets the group consumer's instance ID, switching the group member +// from "dynamic" to "static". +// +// Prior to Kafka 2.3, joining a group gave a group member a new member ID. +// The group leader could not tell if this was a rejoining member. Thus, any +// join caused the group to rebalance. +// +// Kafka 2.3 introduced the concept of an instance ID, which can persist across +// restarts. This allows for avoiding many costly rebalances and allows for +// stickier rebalancing for rejoining members (since the ID for balancing stays +// the same). The main downsides are that you, the user of a client, have to +// manage instance IDs properly, and that it may take longer to rebalance in +// the event that a client legitimately dies. +// +// When using an instance ID, the client does NOT send a leave group request +// when closing. This allows for the client to restart with the same instance +// ID and rejoin the group to avoid a rebalance. It is strongly recommended to +// increase the session timeout enough to allow time for the restart (remember +// that the default session timeout is 10s). +// +// To actually leave the group, you must use an external admin command that +// issues a leave group request on behalf of this instance ID (see kcl), or you +// can manually use the kmsg package with a proper LeaveGroupRequest. +// +// NOTE: Leaving a group with an instance ID is only supported in Kafka 2.4+. +// +// NOTE: If you restart a consumer group leader that is using an instance ID, +// it will not cause a rebalance even if you change which topics the leader is +// consuming. If your cluster is 3.2+, this client internally works around this +// limitation and you do not need to trigger a rebalance manually. +func InstanceID(id string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.instanceID = &id }} +} + +// GroupProtocol sets the group's join protocol, overriding the default value +// "consumer". The only reason to override this is if you are implementing +// custom join and sync group logic. +func GroupProtocol(protocol string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.protocol = protocol }} +} + +// AutoCommitCallback sets the callback to use if autocommitting is enabled. +// This overrides the default callback that logs errors and continues. +func AutoCommitCallback(fn func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error)) GroupOpt { + return groupOpt{func(cfg *cfg) { + if fn != nil { + cfg.commitCallback, cfg.setCommitCallback = fn, true + } + }} +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go new file mode 100644 index 000000000000..01f98da486a6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go @@ -0,0 +1,2347 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "math" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Offset is a message offset in a partition. +type Offset struct { + at int64 + relative int64 + epoch int32 + + currentEpoch int32 // set by us when mapping offsets to brokers + + noReset bool + afterMilli bool +} + +// Random negative, only significant within this package. +const atCommitted = -999 + +// MarshalJSON implements json.Marshaler. +func (o Offset) MarshalJSON() ([]byte, error) { + if o.relative == 0 { + return []byte(fmt.Sprintf(`{"At":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.at, o.epoch, o.currentEpoch)), nil + } + return []byte(fmt.Sprintf(`{"At":%d,"Relative":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.at, o.relative, o.epoch, o.currentEpoch)), nil +} + +// String returns the offset as a string; the purpose of this is for logs. +func (o Offset) String() string { + if o.relative == 0 { + return fmt.Sprintf("{%d e%d ce%d}", o.at, o.epoch, o.currentEpoch) + } else if o.relative > 0 { + return fmt.Sprintf("{%d+%d e%d ce%d}", o.at, o.relative, o.epoch, o.currentEpoch) + } + return fmt.Sprintf("{%d-%d e%d ce%d}", o.at, -o.relative, o.epoch, o.currentEpoch) +} + +// EpochOffset returns this offset as an EpochOffset, allowing visibility into +// what this offset actually currently is. +func (o Offset) EpochOffset() EpochOffset { + return EpochOffset{ + Epoch: o.epoch, + Offset: o.at, + } +} + +// NewOffset creates and returns an offset to use in [ConsumePartitions] or +// [ConsumeResetOffset]. +// +// The default offset begins at the end. +func NewOffset() Offset { + return Offset{ + at: -1, + epoch: -1, + } +} + +// NoResetOffset returns an offset that can be used as a "none" option for the +// [ConsumeResetOffset] option. By default, NoResetOffset starts consuming from +// the beginning of partitions (similar to NewOffset().AtStart()). This can be +// changed with AtEnd, Relative, etc. +// +// Using this offset will make it such that if OffsetOutOfRange is ever +// encountered while consuming, rather than trying to recover, the client will +// return the error to the user and enter a fatal state (for the affected +// partition). +func NoResetOffset() Offset { + return Offset{ + at: -1, + epoch: -1, + noReset: true, + } +} + +// AfterMilli returns an offset that consumes from the first offset after a +// given timestamp. This option is *not* compatible with any At options (nor +// Relative nor WithEpoch); using any of those will clear the special +// millisecond state. +// +// This option can be used to consume at the end of existing partitions, but at +// the start of any new partitions that are created later: +// +// AfterMilli(time.Now().UnixMilli()) +// +// By default when using this offset, if consuming encounters an +// OffsetOutOfRange error, consuming will reset to the first offset after this +// timestamp. You can use NoResetOffset().AfterMilli(...) to instead switch the +// client to a fatal state (for the affected partition). +func (o Offset) AfterMilli(millisec int64) Offset { + o.at = millisec + o.relative = 0 + o.epoch = -1 + o.afterMilli = true + return o +} + +// AtStart copies 'o' and returns an offset starting at the beginning of a +// partition. +func (o Offset) AtStart() Offset { + o.afterMilli = false + o.at = -2 + return o +} + +// AtEnd copies 'o' and returns an offset starting at the end of a partition. +// If you want to consume at the end of the topic as it exists right now, but +// at the beginning of new partitions as they are added to the topic later, +// check out AfterMilli. +func (o Offset) AtEnd() Offset { + o.afterMilli = false + o.at = -1 + return o +} + +// AtCommitted copies 'o' and returns an offset that is used *only if* +// there is an existing commit. This is only useful for group consumers. +// If a partition being consumed does not have a commit, the partition will +// enter a fatal state and return an error from PollFetches. +// +// Using this function automatically opts into [NoResetOffset]. +func (o Offset) AtCommitted() Offset { + o.noReset = true + o.afterMilli = false + o.at = atCommitted + return o +} + +// Relative copies 'o' and returns an offset that starts 'n' relative to what +// 'o' currently is. If 'o' is at the end (from [AtEnd]), Relative(-100) will +// begin 100 before the end. +func (o Offset) Relative(n int64) Offset { + o.afterMilli = false + o.relative = n + return o +} + +// WithEpoch copies 'o' and returns an offset with the given epoch. to use the +// given epoch. This epoch is used for truncation detection; the default of -1 +// implies no truncation detection. +func (o Offset) WithEpoch(e int32) Offset { + o.afterMilli = false + if e < 0 { + e = -1 + } + o.epoch = e + return o +} + +// At returns a copy of the calling offset, changing the returned offset to +// begin at exactly the requested offset. +// +// There are two potential special offsets to use: -2 allows for consuming at +// the start, and -1 allows for consuming at the end. These two offsets are +// equivalent to calling AtStart or AtEnd. +// +// If the offset is less than -2, the client bounds it to -2 to consume at the +// start. +func (o Offset) At(at int64) Offset { + o.afterMilli = false + if at < -2 { + at = -2 + } + o.at = at + return o +} + +type consumer struct { + bufferedRecords atomicI64 + bufferedBytes atomicI64 + + cl *Client + + pausedMu sync.Mutex // grabbed when updating paused + paused atomic.Value // loaded when issuing fetches + + // mu is grabbed when + // - polling fetches, for quickly draining sources / updating group uncommitted + // - calling assignPartitions (group / direct updates) + mu sync.Mutex + d *directConsumer // if non-nil, we are consuming partitions directly + g *groupConsumer // if non-nil, we are consuming as a group member + + // On metadata update, if the consumer is set (direct or group), the + // client begins a goroutine that updates the consumer kind's + // assignments. + // + // This is done in a goroutine to not block the metadata loop, because + // the update **could** wait on a group consumer leaving if a + // concurrent LeaveGroup is called, or if restarting a session takes + // just a little bit of time. + // + // The update realistically should be instantaneous, but if it is slow, + // some metadata updates could pile up. We loop with our atomic work + // loop, which collapses repeated updates into one extra update, so we + // loop as little as necessary. + outstandingMetadataUpdates workLoop + + // sessionChangeMu is grabbed when a session is stopped and held through + // when a session can be started again. The sole purpose is to block an + // assignment change running concurrently with a metadata update. + sessionChangeMu sync.Mutex + + session atomic.Value // *consumerSession + kill atomic.Bool + + usingCursors usedCursors + + sourcesReadyMu sync.Mutex + sourcesReadyCond *sync.Cond + sourcesReadyForDraining []*source + fakeReadyForDraining []Fetch + + pollWaitMu sync.Mutex + pollWaitC *sync.Cond + pollWaitState uint64 // 0 == nothing, low 32 bits: # pollers, high 32: # waiting rebalances +} + +func (c *consumer) loadPaused() pausedTopics { return c.paused.Load().(pausedTopics) } +func (c *consumer) clonePaused() pausedTopics { return c.paused.Load().(pausedTopics).clone() } +func (c *consumer) storePaused(p pausedTopics) { c.paused.Store(p) } + +func (c *consumer) waitAndAddPoller() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + for c.pollWaitState>>32 != 0 { + c.pollWaitC.Wait() + } + // Rebalance always takes priority, but if there are no active + // rebalances, our poll blocks rebalances. + c.pollWaitState++ +} + +func (c *consumer) unaddPoller() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState-- + c.pollWaitC.Broadcast() +} + +func (c *consumer) allowRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + // When allowing rebalances, the user is explicitly saying all pollers + // are done. We mask them out. + c.pollWaitState &= math.MaxUint32 << 32 + c.pollWaitC.Broadcast() +} + +func (c *consumer) waitAndAddRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState += 1 << 32 + for c.pollWaitState&math.MaxUint32 != 0 { + c.pollWaitC.Wait() + } +} + +func (c *consumer) unaddRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState -= 1 << 32 + c.pollWaitC.Broadcast() +} + +// BufferedFetchRecords returns the number of records currently buffered from +// fetching within the client. +// +// This can be used as a gauge to determine how behind your application is for +// processing records the client has fetched. Note that it is perfectly normal +// to see a spike of buffered records, which would correspond to a fetch +// response being processed just before a call to this function. It is only +// problematic if for you if this function is consistently returning large +// values. +func (cl *Client) BufferedFetchRecords() int64 { + return cl.consumer.bufferedRecords.Load() +} + +// BufferedFetchBytes returns the number of bytes currently buffered from +// fetching within the client. This is the sum of all keys, values, and header +// keys/values. See the related [BufferedFetchRecords] for more information. +func (cl *Client) BufferedFetchBytes() int64 { + return cl.consumer.bufferedBytes.Load() +} + +type usedCursors map[*cursor]struct{} + +func (u *usedCursors) use(c *cursor) { + if *u == nil { + *u = make(map[*cursor]struct{}) + } + (*u)[c] = struct{}{} +} + +func (c *consumer) init(cl *Client) { + c.cl = cl + c.paused.Store(make(pausedTopics)) + c.sourcesReadyCond = sync.NewCond(&c.sourcesReadyMu) + c.pollWaitC = sync.NewCond(&c.pollWaitMu) + + if len(cl.cfg.topics) > 0 || len(cl.cfg.partitions) > 0 { + defer cl.triggerUpdateMetadataNow("querying metadata for consumer initialization") // we definitely want to trigger a metadata update + } + + if len(cl.cfg.group) == 0 { + c.initDirect() + } else { + c.initGroup() + } +} + +func (c *consumer) consuming() bool { + return c.g != nil || c.d != nil +} + +// addSourceReadyForDraining tracks that a source needs its buffered fetch +// consumed. +func (c *consumer) addSourceReadyForDraining(source *source) { + c.sourcesReadyMu.Lock() + c.sourcesReadyForDraining = append(c.sourcesReadyForDraining, source) + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() +} + +// addFakeReadyForDraining saves a fake fetch that has important partition +// errors--data loss or auth failures. +func (c *consumer) addFakeReadyForDraining(topic string, partition int32, err error, why string) { + c.cl.cfg.logger.Log(LogLevelInfo, "injecting fake fetch with an error", "err", err, "why", why) + c.sourcesReadyMu.Lock() + c.fakeReadyForDraining = append(c.fakeReadyForDraining, Fetch{Topics: []FetchTopic{{ + Topic: topic, + Partitions: []FetchPartition{{ + Partition: partition, + Err: err, + }}, + }}}) + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() +} + +// NewErrFetch returns a fake fetch containing a single empty topic with a +// single zero partition with the given error. +func NewErrFetch(err error) Fetches { + return []Fetch{{ + Topics: []FetchTopic{{ + Topic: "", + Partitions: []FetchPartition{{ + Partition: -1, + Err: err, + }}, + }}, + }} +} + +// PollFetches waits for fetches to be available, returning as soon as any +// broker returns a fetch. If the context is nil, this function will return +// immediately with any currently buffered records. +// +// If the client is closed, a fake fetch will be injected that has no topic, a +// partition of 0, and a partition error of ErrClientClosed. If the context is +// canceled, a fake fetch will be injected with ctx.Err. These injected errors +// can be used to break out of a poll loop. +// +// It is important to check all partition errors in the returned fetches. If +// any partition has a fatal error and actually had no records, fake fetch will +// be injected with the error. +// +// If you are group consuming, a rebalance can happen under the hood while you +// process the returned fetches. This can result in duplicate work, and you may +// accidentally commit to partitions that you no longer own. You can prevent +// this by using BlockRebalanceOnPoll, but this comes with different tradeoffs. +// See the documentation on BlockRebalanceOnPoll for more information. +func (cl *Client) PollFetches(ctx context.Context) Fetches { + return cl.PollRecords(ctx, 0) +} + +// PollRecords waits for records to be available, returning as soon as any +// broker returns records in a fetch. If the context is nil, this function will +// return immediately with any currently buffered records. +// +// If the client is closed, a fake fetch will be injected that has no topic, a +// partition of -1, and a partition error of ErrClientClosed. If the context is +// canceled, a fake fetch will be injected with ctx.Err. These injected errors +// can be used to break out of a poll loop. +// +// This returns a maximum of maxPollRecords total across all fetches, or +// returns all buffered records if maxPollRecords is <= 0. +// +// It is important to check all partition errors in the returned fetches. If +// any partition has a fatal error and actually had no records, fake fetch will +// be injected with the error. +// +// If you are group consuming, a rebalance can happen under the hood while you +// process the returned fetches. This can result in duplicate work, and you may +// accidentally commit to partitions that you no longer own. You can prevent +// this by using BlockRebalanceOnPoll, but this comes with different tradeoffs. +// See the documentation on BlockRebalanceOnPoll for more information. +func (cl *Client) PollRecords(ctx context.Context, maxPollRecords int) Fetches { + if maxPollRecords == 0 { + maxPollRecords = -1 + } + c := &cl.consumer + + c.g.undirtyUncommitted() + + // If the user gave us a canceled context, we bail immediately after + // un-dirty-ing marked records. + if ctx != nil { + select { + case <-ctx.Done(): + return NewErrFetch(ctx.Err()) + default: + } + } + + var fetches Fetches + fill := func() { + if c.cl.cfg.blockRebalanceOnPoll { + c.waitAndAddPoller() + defer func() { + if len(fetches) == 0 { + c.unaddPoller() + } + }() + } + + paused := c.loadPaused() + + // A group can grab the consumer lock then the group mu and + // assign partitions. The group mu is grabbed to update its + // uncommitted map. Assigning partitions clears sources ready + // for draining. + // + // We need to grab the consumer mu to ensure proper lock + // ordering and prevent lock inversion. Polling fetches also + // updates the group's uncommitted map; if we do not grab the + // consumer mu at the top, we have a problem: without the lock, + // we could have grabbed some sources, then a group assigned, + // and after the assign, we update uncommitted with fetches + // from the old assignment + c.mu.Lock() + defer c.mu.Unlock() + + c.sourcesReadyMu.Lock() + if maxPollRecords < 0 { + for _, ready := range c.sourcesReadyForDraining { + fetches = append(fetches, ready.takeBuffered(paused)) + } + c.sourcesReadyForDraining = nil + } else { + for len(c.sourcesReadyForDraining) > 0 && maxPollRecords > 0 { + source := c.sourcesReadyForDraining[0] + fetch, taken, drained := source.takeNBuffered(paused, maxPollRecords) + if drained { + c.sourcesReadyForDraining = c.sourcesReadyForDraining[1:] + } + maxPollRecords -= taken + fetches = append(fetches, fetch) + } + } + + realFetches := fetches + + fetches = append(fetches, c.fakeReadyForDraining...) + c.fakeReadyForDraining = nil + + c.sourcesReadyMu.Unlock() + + if len(realFetches) == 0 { + return + } + + // Before returning, we want to update our uncommitted. If we + // updated after, then we could end up with weird interactions + // with group invalidations where we return a stale fetch after + // committing in onRevoke. + // + // A blocking onRevoke commit, on finish, allows a new group + // session to start. If we returned stale fetches that did not + // have their uncommitted offset tracked, then we would allow + // duplicates. + if c.g != nil { + c.g.updateUncommitted(realFetches) + } + } + + // We try filling fetches once before waiting. If we have no context, + // we guarantee that we just drain anything available and return. + fill() + if len(fetches) > 0 || ctx == nil { + return fetches + } + + done := make(chan struct{}) + quit := false + go func() { + c.sourcesReadyMu.Lock() + defer c.sourcesReadyMu.Unlock() + defer close(done) + + for !quit && len(c.sourcesReadyForDraining) == 0 && len(c.fakeReadyForDraining) == 0 { + c.sourcesReadyCond.Wait() + } + }() + + exit := func() { + c.sourcesReadyMu.Lock() + quit = true + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() + } + + select { + case <-cl.ctx.Done(): + exit() + return NewErrFetch(ErrClientClosed) + case <-ctx.Done(): + exit() + return NewErrFetch(ctx.Err()) + case <-done: + } + + fill() + return fetches +} + +// AllowRebalance allows a consumer group to rebalance if it was blocked by you +// polling records in tandem with the BlockRebalanceOnPoll option. +// +// You can poll many times before calling this function; this function +// internally resets the poll count and allows any blocked rebalances to +// continue. Rebalances take priority: if a rebalance is blocked, and you allow +// rebalances and then immediately poll, your poll will be blocked until the +// rebalance completes. Internally, this function simply waits for lost +// partitions to stop being fetched before allowing you to poll again. +func (cl *Client) AllowRebalance() { + cl.consumer.allowRebalance() +} + +// UpdateFetchMaxBytes updates the max bytes that a fetch request will ask for +// and the max partition bytes that a fetch request will ask for each +// partition. +func (cl *Client) UpdateFetchMaxBytes(maxBytes, maxPartBytes int32) { + cl.cfg.maxBytes.store(maxBytes) + cl.cfg.maxPartBytes.store(maxPartBytes) +} + +// PauseFetchTopics sets the client to no longer fetch the given topics and +// returns all currently paused topics. Paused topics persist until resumed. +// You can call this function with no topics to simply receive the list of +// currently paused topics. +// +// Pausing topics is independent from pausing individual partitions with the +// PauseFetchPartitions method. If you pause partitions for a topic with +// PauseFetchPartitions, and then pause that same topic with PauseFetchTopics, +// the individually paused partitions will not be unpaused if you only call +// ResumeFetchTopics. +func (cl *Client) PauseFetchTopics(topics ...string) []string { + c := &cl.consumer + if len(topics) == 0 { + return c.loadPaused().pausedTopics() + } + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + paused := c.clonePaused() + paused.addTopics(topics...) + c.storePaused(paused) + return paused.pausedTopics() +} + +// PauseFetchPartitions sets the client to no longer fetch the given partitions +// and returns all currently paused partitions. Paused partitions persist until +// resumed. You can call this function with no partitions to simply receive the +// list of currently paused partitions. +// +// Pausing individual partitions is independent from pausing topics with the +// PauseFetchTopics method. If you pause partitions for a topic with +// PauseFetchPartitions, and then pause that same topic with PauseFetchTopics, +// the individually paused partitions will not be unpaused if you only call +// ResumeFetchTopics. +func (cl *Client) PauseFetchPartitions(topicPartitions map[string][]int32) map[string][]int32 { + c := &cl.consumer + if len(topicPartitions) == 0 { + return c.loadPaused().pausedPartitions() + } + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + paused := c.clonePaused() + paused.addPartitions(topicPartitions) + c.storePaused(paused) + return paused.pausedPartitions() +} + +// ResumeFetchTopics resumes fetching the input topics if they were previously +// paused. Resuming topics that are not currently paused is a per-topic no-op. +// See the documentation on PauseTfetchTopics for more details. +func (cl *Client) ResumeFetchTopics(topics ...string) { + defer cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + c := &cl.consumer + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + + paused := c.clonePaused() + paused.delTopics(topics...) + c.storePaused(paused) +} + +// ResumeFetchPartitions resumes fetching the input partitions if they were +// previously paused. Resuming partitions that are not currently paused is a +// per-topic no-op. See the documentation on PauseFetchPartitions for more +// details. +func (cl *Client) ResumeFetchPartitions(topicPartitions map[string][]int32) { + defer cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + c := &cl.consumer + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + + paused := c.clonePaused() + paused.delPartitions(topicPartitions) + c.storePaused(paused) +} + +// SetOffsets sets any matching offsets in setOffsets to the given +// epoch/offset. Partitions that are not specified are not set. It is invalid +// to set topics that were not yet returned from a PollFetches: this function +// sets only partitions that were previously consumed, any extra partitions are +// skipped. +// +// If directly consuming, this function operates as expected given the caveats +// of the prior paragraph. +// +// If using transactions, it is advised to just use a GroupTransactSession and +// avoid this function entirely. +// +// If using group consuming, It is strongly recommended to use this function +// outside of the context of a PollFetches loop and only when you know the +// group is not revoked (i.e., block any concurrent revoke while issuing this +// call) and to not use this concurrent with committing. Any other usage is +// prone to odd interactions. +func (cl *Client) SetOffsets(setOffsets map[string]map[int32]EpochOffset) { + cl.setOffsets(setOffsets, true) +} + +func (cl *Client) setOffsets(setOffsets map[string]map[int32]EpochOffset, log bool) { + if len(setOffsets) == 0 { + return + } + + // We assignPartitions before returning, so we grab the consumer lock + // first to preserve consumer mu => group mu ordering, or to ensure + // no concurrent metadata assign for direct consuming. + c := &cl.consumer + c.mu.Lock() + defer c.mu.Unlock() + + var assigns map[string]map[int32]Offset + var tps *topicsPartitions + switch { + case c.d != nil: + assigns = c.d.getSetAssigns(setOffsets) + tps = c.d.tps + case c.g != nil: + assigns = c.g.getSetAssigns(setOffsets) + tps = c.g.tps + } + if len(assigns) == 0 { + return + } + if log { + c.assignPartitions(assigns, assignSetMatching, tps, "from manual SetOffsets") + } else { + c.assignPartitions(assigns, assignSetMatching, tps, "") + } +} + +// This is guaranteed to be called in a blocking metadata fn, which ensures +// that metadata does not load the tps we are changing. Basically, we ensure +// everything w.r.t. consuming is at a stand still. +func (c *consumer) purgeTopics(topics []string) { + if c.g == nil && c.d == nil { + return + } + + purgeAssignments := make(map[string]map[int32]Offset, len(topics)) + for _, topic := range topics { + purgeAssignments[topic] = nil + } + + c.waitAndAddRebalance() + defer c.unaddRebalance() + + c.mu.Lock() + defer c.mu.Unlock() + + // The difference for groups is we need to lock the group and there is + // a slight type difference in g.using vs d.using. + if c.g != nil { + c.g.mu.Lock() + defer c.g.mu.Unlock() + c.assignPartitions(purgeAssignments, assignPurgeMatching, c.g.tps, fmt.Sprintf("purge of %v requested", topics)) + for _, topic := range topics { + delete(c.g.using, topic) + delete(c.g.reSeen, topic) + } + c.g.rejoin("rejoin from PurgeFetchTopics") + } else { + c.assignPartitions(purgeAssignments, assignPurgeMatching, c.d.tps, fmt.Sprintf("purge of %v requested", topics)) + for _, topic := range topics { + delete(c.d.using, topic) + delete(c.d.reSeen, topic) + delete(c.d.m, topic) + } + } +} + +// AddConsumeTopics adds new topics to be consumed. This function is a no-op if +// the client is configured to consume via regex. +// +// Note that if you are directly consuming and specified ConsumePartitions, +// this function will not add the rest of the partitions for a topic unless the +// topic has been previously purged. That is, if you directly consumed only one +// of five partitions originally, this will not add the other four until the +// entire topic is purged. +func (cl *Client) AddConsumeTopics(topics ...string) { + c := &cl.consumer + if len(topics) == 0 || c.g == nil && c.d == nil || cl.cfg.regex { + return + } + + // We can do this outside of the metadata loop because we are strictly + // adding new topics and forbid regex consuming. + c.mu.Lock() + defer c.mu.Unlock() + + if c.g != nil { + c.g.tps.storeTopics(topics) + } else { + c.d.tps.storeTopics(topics) + for _, topic := range topics { + c.d.m.addt(topic) + } + } + cl.triggerUpdateMetadataNow("from AddConsumeTopics") +} + +// GetConsumeTopics retrives a list of current topics being consumed. +func (cl *Client) GetConsumeTopics() []string { + c := &cl.consumer + if c.g == nil && c.d == nil { + return nil + } + var m map[string]*topicPartitions + var ok bool + if c.g != nil { + m, ok = c.g.tps.v.Load().(topicsPartitionsData) + } else { + m, ok = c.d.tps.v.Load().(topicsPartitionsData) + } + if !ok { + return nil + } + topics := make([]string, 0, len(m)) + for k := range m { + topics = append(topics, k) + } + return topics +} + +// AddConsumePartitions adds new partitions to be consumed at the given +// offsets. This function works only for direct, non-regex consumers. +func (cl *Client) AddConsumePartitions(partitions map[string]map[int32]Offset) { + c := &cl.consumer + if c.d == nil || cl.cfg.regex { + return + } + var topics []string + for t, ps := range partitions { + if len(ps) == 0 { + delete(partitions, t) + continue + } + topics = append(topics, t) + } + if len(partitions) == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + c.d.tps.storeTopics(topics) + for t, ps := range partitions { + if c.d.ps[t] == nil { + c.d.ps[t] = make(map[int32]Offset) + } + for p, o := range ps { + c.d.m.add(t, p) + c.d.ps[t][p] = o + } + } + cl.triggerUpdateMetadataNow("from AddConsumePartitions") +} + +// RemoveConsumePartitions removes partitions from being consumed. This +// function works only for direct, non-regex consumers. +// +// This method does not purge the concept of any topics from the client -- if +// you remove all partitions from a topic that was being consumed, metadata +// fetches will still occur for the topic. If you want to remove the topic +// entirely, use PurgeTopicsFromClient. +// +// If you specified ConsumeTopics and this function removes all partitions for +// a topic, the topic will no longer be consumed. +func (cl *Client) RemoveConsumePartitions(partitions map[string][]int32) { + c := &cl.consumer + if c.d == nil || cl.cfg.regex { + return + } + for t, ps := range partitions { + if len(ps) == 0 { + delete(partitions, t) + continue + } + } + if len(partitions) == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + removeOffsets := make(map[string]map[int32]Offset, len(partitions)) + for t, ps := range partitions { + removePartitionOffsets := make(map[int32]Offset, len(ps)) + for _, p := range ps { + removePartitionOffsets[p] = Offset{} + } + removeOffsets[t] = removePartitionOffsets + } + + c.assignPartitions(removeOffsets, assignInvalidateMatching, c.d.tps, fmt.Sprintf("remove of %v requested", partitions)) + for t, ps := range partitions { + for _, p := range ps { + c.d.using.remove(t, p) + c.d.m.remove(t, p) + delete(c.d.ps[t], p) + } + if len(c.d.ps[t]) == 0 { + delete(c.d.ps, t) + } + } +} + +// assignHow controls how assignPartitions operates. +type assignHow int8 + +const ( + // This option simply assigns new offsets, doing nothing with existing + // offsets / active fetches / buffered fetches. + assignWithoutInvalidating assignHow = iota + + // This option invalidates active fetches so they will not buffer and + // drops all buffered fetches, and then continues to assign the new + // assignments. + assignInvalidateAll + + // This option does not assign, but instead invalidates any active + // fetches for "assigned" (actually lost) partitions. This additionally + // drops all buffered fetches, because they could contain partitions we + // lost. Thus, with this option, the actual offset in the map is + // meaningless / a dummy offset. + assignInvalidateMatching + + assignPurgeMatching + + // The counterpart to assignInvalidateMatching, assignSetMatching + // resets all matching partitions to the specified offset / epoch. + assignSetMatching +) + +func (h assignHow) String() string { + switch h { + case assignWithoutInvalidating: + return "assigning everything new, keeping current assignment" + case assignInvalidateAll: + return "unassigning everything" + case assignInvalidateMatching: + return "unassigning any currently assigned matching partition that is in the input" + case assignPurgeMatching: + return "unassigning and purging any partition matching the input topics" + case assignSetMatching: + return "reassigning any currently assigned matching partition to the input" + } + return "" +} + +type fmtAssignment map[string]map[int32]Offset + +func (f fmtAssignment) String() string { + var sb strings.Builder + + var topicsWritten int + for topic, partitions := range f { + topicsWritten++ + sb.WriteString(topic) + sb.WriteString("[") + + var partitionsWritten int + for partition, offset := range partitions { + fmt.Fprintf(&sb, "%d%s", partition, offset) + partitionsWritten++ + if partitionsWritten < len(partitions) { + sb.WriteString(" ") + } + } + + sb.WriteString("]") + if topicsWritten < len(f) { + sb.WriteString(", ") + } + } + + return sb.String() +} + +// assignPartitions, called under the consumer's mu, is used to set new cursors +// or add to the existing cursors. +// +// We do not need to pass tps when we are bumping the session or when we are +// invalidating all. All other cases, we want the tps -- the logic below does +// not fully differentiate needing to start a new session vs. just reusing the +// old (third if case below) +func (c *consumer) assignPartitions(assignments map[string]map[int32]Offset, how assignHow, tps *topicsPartitions, why string) { + if c.mu.TryLock() { + c.mu.Unlock() + panic("assignPartitions called without holding the consumer lock, this is a bug in franz-go, please open an issue at github.com/twmb/franz-go") + } + + // The internal code can avoid giving an assign reason in cases where + // the caller logs itself immediately before assigning. We only log if + // there is a reason. + if len(why) > 0 { + c.cl.cfg.logger.Log(LogLevelInfo, "assigning partitions", + "why", why, + "how", how, + "input", fmtAssignment(assignments), + ) + } + var session *consumerSession + var loadOffsets listOrEpochLoads + + defer func() { + if session == nil { // if nil, we stopped the session + session = c.startNewSession(tps) + } else { // else we guarded it + c.unguardSessionChange(session) + } + loadOffsets.loadWithSession(session, "loading offsets in new session from assign") // odds are this assign came from a metadata update, so no reason to force a refresh with loadWithSessionNow + + // If we started a new session or if we unguarded, we have one + // worker. This one worker allowed us to safely add our load + // offsets before the session could be concurrently stopped + // again. Now that we have added the load offsets, we allow the + // session to be stopped. + session.decWorker() + }() + + if how == assignWithoutInvalidating { + // Guarding a session change can actually create a new session + // if we had no session before, which is why we need to pass in + // our topicPartitions. + session = c.guardSessionChange(tps) + } else { + loadOffsets, _ = c.stopSession() + + // First, over all cursors currently in use, we unset them or set them + // directly as appropriate. Anything we do not unset, we keep. + + var keep usedCursors + for usedCursor := range c.usingCursors { + shouldKeep := true + if how == assignInvalidateAll { + usedCursor.unset() + shouldKeep = false + } else { // invalidateMatching or setMatching + if assignTopic, ok := assignments[usedCursor.topic]; ok { + if how == assignPurgeMatching { // topic level + usedCursor.source.removeCursor(usedCursor) + shouldKeep = false + } else if assignPart, ok := assignTopic[usedCursor.partition]; ok { + if how == assignInvalidateMatching { + usedCursor.unset() + shouldKeep = false + } else { // how == assignSetMatching + usedCursor.setOffset(cursorOffset{ + offset: assignPart.at, + lastConsumedEpoch: assignPart.epoch, + }) + } + } + } + } + if shouldKeep { + keep.use(usedCursor) + } + } + c.usingCursors = keep + + // For any partition that was listing offsets or loading + // epochs, we want to ensure that if we are keeping those + // partitions, we re-start the list/load. + // + // Note that we do not need to unset cursors here; anything + // that actually resulted in a cursor is forever tracked in + // usedCursors. We only do not have used cursors if an + // assignment went straight to listing / epoch loading, and + // that list/epoch never finished. + switch how { + case assignWithoutInvalidating: + // Nothing to do -- this is handled above. + case assignInvalidateAll: + loadOffsets = listOrEpochLoads{} + case assignSetMatching: + // We had not yet loaded this partition, so there is + // nothing to set, and we keep everything. + case assignInvalidateMatching: + loadOffsets.keepFilter(func(t string, p int32) bool { + if assignTopic, ok := assignments[t]; ok { + if _, ok := assignTopic[p]; ok { + return false + } + } + return true + }) + case assignPurgeMatching: + // This is slightly different than invalidate in that + // we invalidate whole topics. + loadOffsets.keepFilter(func(t string, _ int32) bool { + _, ok := assignments[t] + return !ok // assignments are topics to purge -- do NOT keep the topic if it is being purged + }) + // We have to purge from tps _after_ the session is + // stopped. If we purge early while the session is + // ongoing, then another goroutine could be loading and + // using tps and expecting topics not yet removed from + // assignPartitions to still be there. Specifically, + // mapLoadsToBrokers could be expecting topic foo to be + // there (from the session!), so if we purge foo before + // stopping the session, we will panic. + topics := make([]string, 0, len(assignments)) + for t := range assignments { + topics = append(topics, t) + } + tps.purgeTopics(topics) + } + } + + // This assignment could contain nothing (for the purposes of + // invalidating active fetches), so we only do this if needed. + if len(assignments) == 0 || how != assignWithoutInvalidating { + return + } + + c.cl.cfg.logger.Log(LogLevelDebug, "assign requires loading offsets") + + topics := tps.load() + for topic, partitions := range assignments { + topicPartitions := topics.loadTopic(topic) // should be non-nil + if topicPartitions == nil { + c.cl.cfg.logger.Log(LogLevelError, "BUG! consumer was assigned topic that we did not ask for in ConsumeTopics nor ConsumePartitions, skipping!", "topic", topic) + continue + } + + for partition, offset := range partitions { + // If we are loading the first record after a millisec, + // we go directly to listing offsets. Epoch validation + // does not ever set afterMilli. + if offset.afterMilli { + loadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: -1, + Offset: offset, + }) + continue + } + + // First, if the request is exact, get rid of the relative + // portion. We are modifying a copy of the offset, i.e. we + // are appropriately not modfying 'assignments' itself. + if offset.at >= 0 { + offset.at += offset.relative + if offset.at < 0 { + offset.at = 0 + } + offset.relative = 0 + } + + // If we are requesting an exact offset with an epoch, + // we do truncation detection and then use the offset. + // + // Otherwise, an epoch is specified without an exact + // request which is useless for us, or a request is + // specified without a known epoch. + // + // The client ensures the epoch is non-negative from + // fetch offsets only if the broker supports KIP-320, + // but we do not override the user manually specifying + // an epoch. + if offset.at >= 0 && offset.epoch >= 0 { + loadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: offset, + }) + continue + } + + // If an exact offset is specified and we have loaded + // the partition, we use it. We have to use epoch -1 + // rather than the latest loaded epoch on the partition + // because the offset being requested to use could be + // from an epoch after OUR loaded epoch. Otherwise, we + // could update the metadata, see the later epoch, + // request the end offset for our prior epoch, and then + // think data loss occurred. + // + // If an offset is unspecified or we have not loaded + // the partition, we list offsets to find out what to + // use. + if offset.at >= 0 && partition >= 0 && partition < int32(len(topicPartitions.partitions)) { + part := topicPartitions.partitions[partition] + cursor := part.cursor + cursor.setOffset(cursorOffset{ + offset: offset.at, + lastConsumedEpoch: -1, + }) + cursor.allowUsable() + c.usingCursors.use(cursor) + continue + } + + // If the offset is atCommitted, then no offset was + // loaded from FetchOffsets. We inject an error and + // avoid using this partition. + if offset.at == atCommitted { + c.addFakeReadyForDraining(topic, partition, errNoCommittedOffset, "notification of uncommitted partition") + continue + } + + loadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: -1, + Offset: offset, + }) + } + } +} + +func (c *consumer) doOnMetadataUpdate() { + if !c.consuming() { + return + } + + // See the comment on the outstandingMetadataUpdates field for why this + // block below. + if c.outstandingMetadataUpdates.maybeBegin() { + doUpdate := func() { + // We forbid reassignments while we do a quick check for + // new assignments--for the direct consumer particularly, + // this prevents TOCTOU, and guards against a concurrent + // assignment from SetOffsets. + c.mu.Lock() + defer c.mu.Unlock() + + switch { + case c.d != nil: + if new := c.d.findNewAssignments(); len(new) > 0 { + c.assignPartitions(new, assignWithoutInvalidating, c.d.tps, "new assignments from direct consumer") + } + case c.g != nil: + c.g.findNewAssignments() + } + + go c.loadSession().doOnMetadataUpdate() + } + + go func() { + again := true + for again { + doUpdate() + again = c.outstandingMetadataUpdates.maybeFinish(false) + } + }() + } +} + +func (s *consumerSession) doOnMetadataUpdate() { + if s == nil || s == noConsumerSession { // no session started yet + return + } + + s.listOrEpochMu.Lock() + defer s.listOrEpochMu.Unlock() + + if s.listOrEpochMetaCh == nil { + return // nothing waiting to load epochs / offsets + } + select { + case s.listOrEpochMetaCh <- struct{}{}: + default: + } +} + +type offsetLoadMap map[string]map[int32]offsetLoad + +// offsetLoad is effectively an Offset, but also includes a potential replica +// to directly use if a cursor had a preferred replica. +type offsetLoad struct { + replica int32 // -1 means leader + Offset +} + +func (o offsetLoad) MarshalJSON() ([]byte, error) { + if o.replica == -1 { + return o.Offset.MarshalJSON() + } + if o.relative == 0 { + return []byte(fmt.Sprintf(`{"Replica":%d,"At":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.replica, o.at, o.epoch, o.currentEpoch)), nil + } + return []byte(fmt.Sprintf(`{"Replica":%d,"At":%d,"Relative":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.replica, o.at, o.relative, o.epoch, o.currentEpoch)), nil +} + +func (o offsetLoadMap) errToLoaded(err error) []loadedOffset { + var loaded []loadedOffset + for t, ps := range o { + for p, o := range ps { + loaded = append(loaded, loadedOffset{ + topic: t, + partition: p, + err: err, + request: o, + }) + } + } + return loaded +} + +// Combines list and epoch loads into one type for simplicity. +type listOrEpochLoads struct { + // List and Epoch are public so that anything marshaling through + // reflect (i.e. json) can see the fields. + List offsetLoadMap + Epoch offsetLoadMap +} + +type listOrEpochLoadType uint8 + +const ( + loadTypeList listOrEpochLoadType = iota + loadTypeEpoch +) + +func (l listOrEpochLoadType) String() string { + switch l { + case loadTypeList: + return "list" + default: + return "epoch" + } +} + +// adds an offset to be loaded, ensuring it exists only in the final loadType. +func (l *listOrEpochLoads) addLoad(t string, p int32, loadType listOrEpochLoadType, load offsetLoad) { + l.removeLoad(t, p) + dst := &l.List + if loadType == loadTypeEpoch { + dst = &l.Epoch + } + + if *dst == nil { + *dst = make(offsetLoadMap) + } + ps := (*dst)[t] + if ps == nil { + ps = make(map[int32]offsetLoad) + (*dst)[t] = ps + } + ps[p] = load +} + +func (l *listOrEpochLoads) removeLoad(t string, p int32) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + if m == nil { + continue + } + ps := m[t] + if ps == nil { + continue + } + delete(ps, p) + if len(ps) == 0 { + delete(m, t) + } + } +} + +func (l listOrEpochLoads) each(fn func(string, int32)) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + for topic, partitions := range m { + for partition := range partitions { + fn(topic, partition) + } + } + } +} + +func (l *listOrEpochLoads) keepFilter(keep func(string, int32) bool) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + for t, ps := range m { + for p := range ps { + if !keep(t, p) { + delete(ps, p) + if len(ps) == 0 { + delete(m, t) + } + } + } + } + } +} + +// Merges loads into the caller; used to coalesce loads while a metadata update +// is happening (see the only use below). +func (l *listOrEpochLoads) mergeFrom(src listOrEpochLoads) { + for _, srcs := range []struct { + m offsetLoadMap + loadType listOrEpochLoadType + }{ + {src.List, loadTypeList}, + {src.Epoch, loadTypeEpoch}, + } { + for t, ps := range srcs.m { + for p, load := range ps { + l.addLoad(t, p, srcs.loadType, load) + } + } + } +} + +func (l listOrEpochLoads) isEmpty() bool { return len(l.List) == 0 && len(l.Epoch) == 0 } + +func (l listOrEpochLoads) loadWithSession(s *consumerSession, why string) { + if !l.isEmpty() { + s.incWorker() + go s.listOrEpoch(l, false, why) + } +} + +func (l listOrEpochLoads) loadWithSessionNow(s *consumerSession, why string) bool { + if !l.isEmpty() { + s.incWorker() + go s.listOrEpoch(l, true, why) + return true + } + return false +} + +// A consumer session is responsible for an era of fetching records for a set +// of cursors. The set can be added to without killing an active session, but +// it cannot be removed from. Removing any cursor from being consumed kills the +// current consumer session and begins a new one. +type consumerSession struct { + c *consumer + + ctx context.Context + cancel func() + + // tps tracks the topics that were assigned in this session. We use + // this field to build and handle list offset / load epoch requests. + tps *topicsPartitions + + // desireFetchCh is sized to the number of concurrent fetches we are + // configured to be able to send. + // + // We receive desires from sources, we reply when they can fetch, and + // they send back when they are done. Thus, three level chan. + desireFetchCh chan chan chan struct{} + cancelFetchCh chan chan chan struct{} + allowedFetches int + fetchManagerStarted atomicBool // atomic, once true, we start the fetch manager + + // Workers signify the number of fetch and list / epoch goroutines that + // are currently running within the context of this consumer session. + // Stopping a session only returns once workers hits zero. + workersMu sync.Mutex + workersCond *sync.Cond + workers int + + listOrEpochMu sync.Mutex + listOrEpochLoadsWaiting listOrEpochLoads + listOrEpochMetaCh chan struct{} // non-nil if Loads is non-nil, signalled on meta update + listOrEpochLoadsLoading listOrEpochLoads +} + +func (c *consumer) newConsumerSession(tps *topicsPartitions) *consumerSession { + if tps == nil || len(tps.load()) == 0 { + return noConsumerSession + } + ctx, cancel := context.WithCancel(c.cl.ctx) + session := &consumerSession{ + c: c, + + ctx: ctx, + cancel: cancel, + + tps: tps, + + // NOTE: This channel must be unbuffered. If it is buffered, + // then we can exit manageFetchConcurrency when we should not + // and have a deadlock: + // + // * source sends to desireFetchCh, is buffered + // * source seeds context canceled, tries sending to cancelFetchCh + // * session concurrently sees context canceled + // * session has not drained desireFetchCh, sees activeFetches is 0 + // * session exits + // * source permanently hangs sending to desireFetchCh + // + // By having desireFetchCh unbuffered, we *ensure* that if the + // source indicates it wants a fetch, the session knows it and + // tracks it in wantFetch. + // + // See #198. + desireFetchCh: make(chan chan chan struct{}), + + cancelFetchCh: make(chan chan chan struct{}, 4), + allowedFetches: c.cl.cfg.maxConcurrentFetches, + } + session.workersCond = sync.NewCond(&session.workersMu) + return session +} + +func (s *consumerSession) desireFetch() chan chan chan struct{} { + if !s.fetchManagerStarted.Swap(true) { + go s.manageFetchConcurrency() + } + return s.desireFetchCh +} + +func (s *consumerSession) manageFetchConcurrency() { + var ( + activeFetches int + doneFetch = make(chan struct{}, 20) + wantFetch []chan chan struct{} + + ctxCh = s.ctx.Done() + wantQuit bool + ) + for { + select { + case register := <-s.desireFetchCh: + wantFetch = append(wantFetch, register) + case cancel := <-s.cancelFetchCh: + var found bool + for i, want := range wantFetch { + if want == cancel { + _ = append(wantFetch[i:], wantFetch[i+1:]...) + wantFetch = wantFetch[:len(wantFetch)-1] + found = true + } + } + // If we did not find the channel, then we have already + // sent to it, removed it from our wantFetch list, and + // bumped activeFetches. + if !found { + activeFetches-- + } + + case <-doneFetch: + activeFetches-- + case <-ctxCh: + wantQuit = true + ctxCh = nil + } + + if len(wantFetch) > 0 && (activeFetches < s.allowedFetches || s.allowedFetches == 0) { // 0 means unbounded + wantFetch[0] <- doneFetch + wantFetch = wantFetch[1:] + activeFetches++ + continue + } + + if wantQuit && activeFetches == 0 { + return + } + } +} + +func (s *consumerSession) incWorker() { + if s == noConsumerSession { // from startNewSession + return + } + s.workersMu.Lock() + defer s.workersMu.Unlock() + s.workers++ +} + +func (s *consumerSession) decWorker() { + if s == noConsumerSession { // from followup to startNewSession + return + } + s.workersMu.Lock() + defer s.workersMu.Unlock() + s.workers-- + if s.workers == 0 { + s.workersCond.Broadcast() + } +} + +// noConsumerSession exists because we cannot store nil into an atomic.Value. +var noConsumerSession = new(consumerSession) + +func (c *consumer) loadSession() *consumerSession { + if session := c.session.Load(); session != nil { + return session.(*consumerSession) + } + return noConsumerSession +} + +// Guards against a session being stopped, and must be paired with an unguard. +// This returns a new session if there was no session. +// +// The purpose of this function is when performing additive-only changes to an +// existing session, because additive-only changes can avoid killing a running +// session. +func (c *consumer) guardSessionChange(tps *topicsPartitions) *consumerSession { + c.sessionChangeMu.Lock() + + session := c.loadSession() + if session == noConsumerSession { + // If there is no session, we simply store one. This is fine; + // sources will be able to begin a fetch loop, but they will + // have no cursors to consume yet. + session = c.newConsumerSession(tps) + c.session.Store(session) + } + + return session +} + +// For the same reason below as in startNewSession, we inc a worker before +// unguarding. This allows the unguarding to execute a bit of logic if +// necessary before the session can be stopped. +func (c *consumer) unguardSessionChange(session *consumerSession) { + session.incWorker() + c.sessionChangeMu.Unlock() +} + +// Stops an active consumer session if there is one, and does not return until +// all fetching, listing, offset for leader epoching is complete. This +// invalidates any buffered fetches for the previous session and returns any +// partitions that were listing offsets or loading epochs. +func (c *consumer) stopSession() (listOrEpochLoads, *topicsPartitions) { + c.sessionChangeMu.Lock() + + session := c.loadSession() + + if session == noConsumerSession { + return listOrEpochLoads{}, nil // we had no session + } + + // Before storing noConsumerSession, cancel our old. This pairs + // with the reverse ordering in source, which checks noConsumerSession + // then checks the session context. + session.cancel() + + // At this point, any in progress fetches, offset lists, or epoch loads + // will quickly die. + + c.session.Store(noConsumerSession) + + // At this point, no source can be started, because the session is + // noConsumerSession. + + session.workersMu.Lock() + for session.workers > 0 { + session.workersCond.Wait() + } + session.workersMu.Unlock() + + // At this point, all fetches, lists, and loads are dead. We can close + // our num-fetches manager without worrying about a source trying to + // register itself. + + c.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.session.reset() + }) + + // At this point, if we begin fetching anew, then the sources will not + // be using stale fetch sessions. + + c.sourcesReadyMu.Lock() + defer c.sourcesReadyMu.Unlock() + for _, ready := range c.sourcesReadyForDraining { + ready.discardBuffered() + } + c.sourcesReadyForDraining = nil + + // At this point, we have invalidated any buffered data from the prior + // session. We leave any fake things that were ready so that the user + // can act on errors. The session is dead. + + session.listOrEpochLoadsWaiting.mergeFrom(session.listOrEpochLoadsLoading) + return session.listOrEpochLoadsWaiting, session.tps +} + +// Starts a new consumer session, allowing fetches to happen. +// +// If there are no topic partitions to start with, this returns noConsumerSession. +// +// This is returned with 1 worker; decWorker must be called after return. The +// 1 worker allows for initialization work to prevent the session from being +// immediately stopped. +func (c *consumer) startNewSession(tps *topicsPartitions) *consumerSession { + if c.kill.Load() { + tps = nil + } + session := c.newConsumerSession(tps) + c.session.Store(session) + + // Ensure that this session is usable before being stopped immediately. + // The caller must dec workers. + session.incWorker() + + // At this point, sources can start consuming. + + c.sessionChangeMu.Unlock() + + c.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + // At this point, any source that was not consuming becauase it saw the + // session was stopped has been notified to potentially start consuming + // again. The session is alive. + + return session +} + +// This function is responsible for issuing ListOffsets or +// OffsetForLeaderEpoch. These requests's responses are only handled within +// the context of a consumer session. +func (s *consumerSession) listOrEpoch(waiting listOrEpochLoads, immediate bool, why string) { + defer s.decWorker() + + // It is possible for a metadata update to try to migrate partition + // loads if the update moves partitions between brokers. If we are + // closing the client, the consumer session could already be stopped, + // but this stops before the metadata goroutine is killed. So, if we + // are in this function but actually have no session, we return. + if s == noConsumerSession { + return + } + + wait := true + if immediate { + s.c.cl.triggerUpdateMetadataNow(why) + } else { + wait = s.c.cl.triggerUpdateMetadata(false, why) // avoid trigger if within refresh interval + } + + s.listOrEpochMu.Lock() // collapse any listOrEpochs that occur during meta update into one + if !s.listOrEpochLoadsWaiting.isEmpty() { + s.listOrEpochLoadsWaiting.mergeFrom(waiting) + s.listOrEpochMu.Unlock() + return + } + s.listOrEpochLoadsWaiting = waiting + s.listOrEpochMetaCh = make(chan struct{}, 1) + s.listOrEpochMu.Unlock() + + if wait { + select { + case <-s.ctx.Done(): + return + case <-s.listOrEpochMetaCh: + } + } + + s.listOrEpochMu.Lock() + loading := s.listOrEpochLoadsWaiting + s.listOrEpochLoadsLoading.mergeFrom(loading) + s.listOrEpochLoadsWaiting = listOrEpochLoads{} + s.listOrEpochMetaCh = nil + s.listOrEpochMu.Unlock() + + brokerLoads := s.mapLoadsToBrokers(loading) + + results := make(chan loadedOffsets, 2*len(brokerLoads)) // each broker can receive up to two requests + + var issued, received int + for broker, brokerLoad := range brokerLoads { + s.c.cl.cfg.logger.Log(LogLevelDebug, "offsets to load broker", "broker", broker.meta.NodeID, "load", brokerLoad) + if len(brokerLoad.List) > 0 { + issued++ + go s.c.cl.listOffsetsForBrokerLoad(s.ctx, broker, brokerLoad.List, s.tps, results) + } + if len(brokerLoad.Epoch) > 0 { + issued++ + go s.c.cl.loadEpochsForBrokerLoad(s.ctx, broker, brokerLoad.Epoch, s.tps, results) + } + } + + var reloads listOrEpochLoads + defer func() { + if !reloads.isEmpty() { + s.incWorker() + go func() { + // Before we dec our worker, we must add the + // reloads back into the session's waiting loads. + // Doing so allows a concurrent stopSession to + // track the waiting loads, whereas if we did not + // add things back to the session, we could abandon + // loading these offsets and have a stuck cursor. + defer s.decWorker() + defer reloads.loadWithSession(s, "reload offsets from load failure") + after := time.NewTimer(time.Second) + defer after.Stop() + select { + case <-after.C: + case <-s.ctx.Done(): + return + } + }() + } + }() + + for received != issued { + select { + case <-s.ctx.Done(): + // If we return early, our session was canceled. We do + // not move loading list or epoch loads back to + // waiting; the session stopping manages that. + return + case loaded := <-results: + received++ + reloads.mergeFrom(s.handleListOrEpochResults(loaded)) + } + } +} + +// Called within a consumer session, this function handles results from list +// offsets or epoch loads and returns any loads that should be retried. +// +// To us, all errors are reloadable. We either have request level retryable +// errors (unknown partition, etc) or non-retryable errors (auth), or we have +// request issuing errors (no dial, connection cut repeatedly). +// +// For retryable request errors, we may as well back off a little bit to allow +// Kafka to harmonize if the topic exists / etc. +// +// For non-retryable request errors, we may as well retry to both (a) allow the +// user more signals about a problem that they can maybe fix within Kafka (i.e. +// the auth), and (b) force the user to notice errors. +// +// For request issuing errors, we may as well continue to retry because there +// is not much else we can do. RequestWith already retries, but returns when +// the retry limit is hit. We will backoff 1s and then allow RequestWith to +// continue requesting and backing off. +func (s *consumerSession) handleListOrEpochResults(loaded loadedOffsets) (reloads listOrEpochLoads) { + // This function can be running twice concurrently, so we need to guard + // listOrEpochLoadsLoading and usingCursors. For simplicity, we just + // guard this entire function. + + debug := s.c.cl.cfg.logger.Level() >= LogLevelDebug + + var using map[string]map[int32]EpochOffset + type epochOffsetWhy struct { + EpochOffset + error + } + var reloading map[string]map[int32]epochOffsetWhy + if debug { + using = make(map[string]map[int32]EpochOffset) + reloading = make(map[string]map[int32]epochOffsetWhy) + defer func() { + t := "list" + if loaded.loadType == loadTypeEpoch { + t = "epoch" + } + s.c.cl.cfg.logger.Log(LogLevelDebug, fmt.Sprintf("handled %s results", t), "broker", logID(loaded.broker), "using", using, "reloading", reloading) + }() + } + + s.listOrEpochMu.Lock() + defer s.listOrEpochMu.Unlock() + + for _, load := range loaded.loaded { + s.listOrEpochLoadsLoading.removeLoad(load.topic, load.partition) // remove the tracking of this load from our session + + use := func() { + if debug { + tusing := using[load.topic] + if tusing == nil { + tusing = make(map[int32]EpochOffset) + using[load.topic] = tusing + } + tusing[load.partition] = EpochOffset{load.leaderEpoch, load.offset} + } + + load.cursor.setOffset(cursorOffset{ + offset: load.offset, + lastConsumedEpoch: load.leaderEpoch, + }) + load.cursor.allowUsable() + s.c.usingCursors.use(load.cursor) + } + + var edl *ErrDataLoss + switch { + case errors.As(load.err, &edl): + s.c.addFakeReadyForDraining(load.topic, load.partition, load.err, "notification of data loss") // signal we lost data, but set the cursor to what we can + use() + + case load.err == nil: + use() + + default: // from ErrorCode in a response, or broker request err, or request is canceled as our session is ending + reloads.addLoad(load.topic, load.partition, loaded.loadType, load.request) + if !kerr.IsRetriable(load.err) && !isRetryableBrokerErr(load.err) && !isDialNonTimeoutErr(load.err) && !isContextErr(load.err) { // non-retryable response error; signal such in a response + s.c.addFakeReadyForDraining(load.topic, load.partition, load.err, fmt.Sprintf("notification of non-retryable error from %s request", loaded.loadType)) + } + + if debug { + treloading := reloading[load.topic] + if treloading == nil { + treloading = make(map[int32]epochOffsetWhy) + reloading[load.topic] = treloading + } + treloading[load.partition] = epochOffsetWhy{EpochOffset{load.leaderEpoch, load.offset}, load.err} + } + } + } + + return reloads +} + +// Splits the loads into per-broker loads, mapping each partition to the broker +// that leads that partition. +func (s *consumerSession) mapLoadsToBrokers(loads listOrEpochLoads) map[*broker]listOrEpochLoads { + brokerLoads := make(map[*broker]listOrEpochLoads) + + s.c.cl.brokersMu.RLock() // hold mu so we can check if partition leaders exist + defer s.c.cl.brokersMu.RUnlock() + + brokers := s.c.cl.brokers + seed := s.c.cl.loadSeeds()[0] + + topics := s.tps.load() + for _, loads := range []struct { + m offsetLoadMap + loadType listOrEpochLoadType + }{ + {loads.List, loadTypeList}, + {loads.Epoch, loadTypeEpoch}, + } { + for topic, partitions := range loads.m { + topicPartitions := topics.loadTopic(topic) // this must exist, it not existing would be a bug + for partition, offset := range partitions { + // We default to the first seed broker if we have no loaded + // the broker leader for this partition (we should have). + // Worst case, we get an error for the partition and retry. + broker := seed + if partition >= 0 && partition < int32(len(topicPartitions.partitions)) { + topicPartition := topicPartitions.partitions[partition] + brokerID := topicPartition.leader + if offset.replica != -1 { + // If we are fetching from a follower, we can list + // offsets against the follower itself. The replica + // being non-negative signals that. + brokerID = offset.replica + } + if tryBroker := findBroker(brokers, brokerID); tryBroker != nil { + broker = tryBroker + } + offset.currentEpoch = topicPartition.leaderEpoch // ensure we set our latest epoch for the partition + } + + brokerLoad := brokerLoads[broker] + brokerLoad.addLoad(topic, partition, loads.loadType, offset) + brokerLoads[broker] = brokerLoad + } + } + } + + return brokerLoads +} + +// The result of ListOffsets or OffsetForLeaderEpoch for an individual +// partition. +type loadedOffset struct { + topic string + partition int32 + + // The following three are potentially unset if the error is non-nil + // and not ErrDataLoss; these are what we loaded. + cursor *cursor + offset int64 + leaderEpoch int32 + + // Any error encountered for loading this partition, or for epoch + // loading, potentially ErrDataLoss. If this error is not retryable, we + // avoid reloading the offset and instead inject a fake partition for + // PollFetches containing this error. + err error + + // The original request. + request offsetLoad +} + +// The results of ListOffsets or OffsetForLeaderEpoch for an individual broker. +type loadedOffsets struct { + broker int32 + loaded []loadedOffset + loadType listOrEpochLoadType +} + +func (l *loadedOffsets) add(a loadedOffset) { l.loaded = append(l.loaded, a) } +func (l *loadedOffsets) addAll(as []loadedOffset) loadedOffsets { + l.loaded = append(l.loaded, as...) + return *l +} + +func (cl *Client) listOffsetsForBrokerLoad(ctx context.Context, broker *broker, load offsetLoadMap, tps *topicsPartitions, results chan<- loadedOffsets) { + loaded := loadedOffsets{broker: broker.meta.NodeID, loadType: loadTypeList} + + req1, req2 := load.buildListReq(cl.cfg.isolationLevel) + var ( + wg sync.WaitGroup + kresp2 kmsg.Response + err2 error + ) + if req2 != nil { + wg.Add(1) + go func() { + defer wg.Done() + kresp2, err2 = broker.waitResp(ctx, req2) + }() + } + kresp, err := broker.waitResp(ctx, req1) + wg.Wait() + if err != nil || err2 != nil { + results <- loaded.addAll(load.errToLoaded(err)) + return + } + + topics := tps.load() + resp := kresp.(*kmsg.ListOffsetsResponse) + + // If we issued a second req to check that an exact offset is in + // bounds, then regrettably for safety, we have to ensure that the + // shapes of both responses match, and the topic & partition at each + // index matches. Anything that does not match is skipped (and would be + // a bug from Kafka), and we at the end return UnknownTopicOrPartition. + var resp2 *kmsg.ListOffsetsResponse + if req2 != nil { + resp2 = kresp2.(*kmsg.ListOffsetsResponse) + for _, r := range []*kmsg.ListOffsetsResponse{ + resp, + resp2, + } { + ts := r.Topics + sort.Slice(ts, func(i, j int) bool { + return ts[i].Topic < ts[j].Topic + }) + for i := range ts { + ps := ts[i].Partitions + sort.Slice(ps, func(i, j int) bool { + return ps[i].Partition < ps[j].Partition + }) + } + } + + lt := resp.Topics + rt := resp2.Topics + lkeept := lt[:0] + rkeept := rt[:0] + // Over each response, we only keep the topic if the topics match. + for len(lt) > 0 && len(rt) > 0 { + if lt[0].Topic < rt[0].Topic { + lt = lt[1:] + continue + } + if rt[0].Topic < lt[0].Topic { + rt = rt[1:] + continue + } + // As well, for topics that match, we only keep + // partitions that match. In this case, we also want + // both partitions to be error free, otherwise we keep + // an error on both. If one has old style offsets, + // both must. + lp := lt[0].Partitions + rp := rt[0].Partitions + lkeepp := lp[:0] + rkeepp := rp[:0] + for len(lp) > 0 && len(rp) > 0 { + if lp[0].Partition < rp[0].Partition { + lp = lp[1:] + continue + } + if rp[0].Partition < lp[0].Partition { + rp = rp[1:] + continue + } + if len(lp[0].OldStyleOffsets) > 0 && len(rp[0].OldStyleOffsets) == 0 || + len(lp[0].OldStyleOffsets) == 0 && len(rp[0].OldStyleOffsets) > 0 { + lp = lp[1:] + rp = rp[1:] + continue + } + if lp[0].ErrorCode != 0 { + rp[0].ErrorCode = lp[0].ErrorCode + } else if rp[0].ErrorCode != 0 { + lp[0].ErrorCode = rp[0].ErrorCode + } + lkeepp = append(lkeepp, lp[0]) + rkeepp = append(rkeepp, rp[0]) + lp = lp[1:] + rp = rp[1:] + } + // Now we update the partitions in the topic we are + // keeping, and keep our topic. + lt[0].Partitions = lkeepp + rt[0].Partitions = rkeepp + lkeept = append(lkeept, lt[0]) + rkeept = append(rkeept, rt[0]) + lt = lt[1:] + rt = rt[1:] + } + // Finally, update each response with the topics we kept. The + // shapes and indices are the same. + resp.Topics = lkeept + resp2.Topics = rkeept + } + + poffset := func(p *kmsg.ListOffsetsResponseTopicPartition) int64 { + offset := p.Offset + if len(p.OldStyleOffsets) > 0 { + offset = p.OldStyleOffsets[0] // list offsets v0 + } + return offset + } + + for i, rTopic := range resp.Topics { + topic := rTopic.Topic + loadParts, ok := load[topic] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + topicPartitions := topics.loadTopic(topic) // must be non-nil at this point + for j, rPartition := range rTopic.Partitions { + partition := rPartition.Partition + loadPart, ok := loadParts[partition] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + if err := kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + err: err, + request: loadPart, + }) + continue // partition err: handled in results + } + + if partition < 0 || partition >= int32(len(topicPartitions.partitions)) { + continue // should not happen: we have not seen this partition from a metadata response + } + topicPartition := topicPartitions.partitions[partition] + + delete(loadParts, partition) + if len(loadParts) == 0 { + delete(load, topic) + } + + offset := poffset(&rPartition) + end := func() int64 { return poffset(&resp2.Topics[i].Partitions[j]) } + + // We ensured the resp2 shape is as we want and has no + // error, so resp2 lookups are safe. + if loadPart.afterMilli { + // If after a milli, if the milli is after the + // end of a partition, the offset is -1. We use + // our end offset request: anything after the + // end offset *now* is after our milli. + if offset == -1 { + offset = end() + } + } else if loadPart.at >= 0 { + // If an exact offset, we listed start and end. + // We validate the offset is within bounds. + end := end() + want := loadPart.at + loadPart.relative + if want >= offset { + offset = want + } + if want >= end { + offset = end + } + } else if loadPart.at == -2 && loadPart.relative > 0 { + // Relative to the start: both start & end were + // issued, and we bound to the end. + offset += loadPart.relative + if end := end(); offset >= end { + offset = end + } + } else if loadPart.at == -1 && loadPart.relative < 0 { + // Relative to the end: both start & end were + // issued, offset is currently the start, so we + // set to the end and then bound to the start. + start := offset + offset = end() + offset += loadPart.relative + if offset <= start { + offset = start + } + } + if offset < 0 { + offset = 0 // sanity + } + + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + cursor: topicPartition.cursor, + offset: offset, + leaderEpoch: rPartition.LeaderEpoch, + request: loadPart, + }) + } + } + + results <- loaded.addAll(load.errToLoaded(kerr.UnknownTopicOrPartition)) +} + +func (*Client) loadEpochsForBrokerLoad(ctx context.Context, broker *broker, load offsetLoadMap, tps *topicsPartitions, results chan<- loadedOffsets) { + loaded := loadedOffsets{broker: broker.meta.NodeID, loadType: loadTypeEpoch} + + kresp, err := broker.waitResp(ctx, load.buildEpochReq()) + if err != nil { + results <- loaded.addAll(load.errToLoaded(err)) + return + } + + // If the version is < 2, we are speaking to an old broker. We should + // not have an old version, but we could have spoken to a new broker + // first then an old broker in the middle of a broker roll. For now, we + // will just loop retrying until the broker is upgraded. + + topics := tps.load() + resp := kresp.(*kmsg.OffsetForLeaderEpochResponse) + for _, rTopic := range resp.Topics { + topic := rTopic.Topic + loadParts, ok := load[topic] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + topicPartitions := topics.loadTopic(topic) // must be non-nil at this point + for _, rPartition := range rTopic.Partitions { + partition := rPartition.Partition + loadPart, ok := loadParts[partition] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + if err := kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + err: err, + request: loadPart, + }) + continue // partition err: handled in results + } + + if partition < 0 || partition >= int32(len(topicPartitions.partitions)) { + continue // should not happen: we have not seen this partition from a metadata response + } + topicPartition := topicPartitions.partitions[partition] + + delete(loadParts, partition) + if len(loadParts) == 0 { + delete(load, topic) + } + + // Epoch loading never uses noReset nor afterMilli; + // this at is the offset we wanted to consume and are + // validating. + offset := loadPart.at + var err error + if rPartition.EndOffset < offset { + err = &ErrDataLoss{topic, partition, offset, rPartition.EndOffset} + offset = rPartition.EndOffset + } + + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + cursor: topicPartition.cursor, + offset: offset, + leaderEpoch: rPartition.LeaderEpoch, + err: err, + request: loadPart, + }) + } + } + + results <- loaded.addAll(load.errToLoaded(kerr.UnknownTopicOrPartition)) +} + +// In general this returns one request, but if the user is using exact offsets +// rather than start/end, then we issue both the start and end requests to +// ensure the user's requested offset is within bounds. +func (o offsetLoadMap) buildListReq(isolationLevel int8) (r1, r2 *kmsg.ListOffsetsRequest) { + r1 = kmsg.NewPtrListOffsetsRequest() + r1.ReplicaID = -1 + r1.IsolationLevel = isolationLevel + r1.Topics = make([]kmsg.ListOffsetsRequestTopic, 0, len(o)) + var createEnd bool + for topic, partitions := range o { + parts := make([]kmsg.ListOffsetsRequestTopicPartition, 0, len(partitions)) + for partition, offset := range partitions { + // If this is a milli request, we issue two lists: if + // our milli is after the end of a partition, we get no + // offset back and we want to know the start offset + // (since it will be after our milli). + // + // If we are using an exact offset request, we issue + // the start and end so that we can bound the exact + // offset to being within that range. + // + // If we are using a relative offset, we potentially + // issue the end request because relative may shift us + // too far in the other direction. + timestamp := offset.at + if offset.afterMilli { + createEnd = true + } else if timestamp >= 0 || timestamp == -2 && offset.relative > 0 || timestamp == -1 && offset.relative < 0 { + timestamp = -2 + createEnd = true + } + p := kmsg.NewListOffsetsRequestTopicPartition() + p.Partition = partition + p.CurrentLeaderEpoch = offset.currentEpoch // KIP-320 + p.Timestamp = timestamp + p.MaxNumOffsets = 1 + + parts = append(parts, p) + } + t := kmsg.NewListOffsetsRequestTopic() + t.Topic = topic + t.Partitions = parts + r1.Topics = append(r1.Topics, t) + } + + if createEnd { + r2 = kmsg.NewPtrListOffsetsRequest() + *r2 = *r1 + r2.Topics = append([]kmsg.ListOffsetsRequestTopic(nil), r1.Topics...) + for i := range r1.Topics { + l := &r2.Topics[i] + r := &r1.Topics[i] + *l = *r + l.Partitions = append([]kmsg.ListOffsetsRequestTopicPartition(nil), r.Partitions...) + for i := range l.Partitions { + l.Partitions[i].Timestamp = -1 + } + } + } + + return r1, r2 +} + +func (o offsetLoadMap) buildEpochReq() *kmsg.OffsetForLeaderEpochRequest { + req := kmsg.NewPtrOffsetForLeaderEpochRequest() + req.ReplicaID = -1 + req.Topics = make([]kmsg.OffsetForLeaderEpochRequestTopic, 0, len(o)) + for topic, partitions := range o { + parts := make([]kmsg.OffsetForLeaderEpochRequestTopicPartition, 0, len(partitions)) + for partition, offset := range partitions { + p := kmsg.NewOffsetForLeaderEpochRequestTopicPartition() + p.Partition = partition + p.CurrentLeaderEpoch = offset.currentEpoch + p.LeaderEpoch = offset.epoch + parts = append(parts, p) + } + t := kmsg.NewOffsetForLeaderEpochRequestTopic() + t.Topic = topic + t.Partitions = parts + req.Topics = append(req.Topics, t) + } + return req +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go new file mode 100644 index 000000000000..bf42dbcae4e8 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go @@ -0,0 +1,159 @@ +package kgo + +type directConsumer struct { + cfg *cfg + tps *topicsPartitions // data for topics that the user assigned + using mtmps // topics we are currently using + m mtmps // mirrors cfg.topics and cfg.partitions, but can change with Purge or Add + ps map[string]map[int32]Offset // mirrors cfg.partitions, changed in Purge or Add + reSeen map[string]bool // topics we evaluated against regex, and whether we want them or not +} + +func (c *consumer) initDirect() { + d := &directConsumer{ + cfg: &c.cl.cfg, + tps: newTopicsPartitions(), + reSeen: make(map[string]bool), + using: make(mtmps), + m: make(mtmps), + ps: make(map[string]map[int32]Offset), + } + c.d = d + + if d.cfg.regex { + return + } + + var topics []string + for topic, partitions := range d.cfg.partitions { + topics = append(topics, topic) + for partition := range partitions { + d.m.add(topic, partition) + } + p := make(map[int32]Offset, len(partitions)) + for partition, offset := range partitions { + p[partition] = offset + } + d.ps[topic] = p + } + for topic := range d.cfg.topics { + topics = append(topics, topic) + d.m.addt(topic) + } + d.tps.storeTopics(topics) // prime topics to load if non-regex (this is of no benefit if regex) +} + +// For SetOffsets, unlike the group consumer, we just blindly translate the +// input EpochOffsets into Offsets, and those will be set directly. +func (*directConsumer) getSetAssigns(setOffsets map[string]map[int32]EpochOffset) (assigns map[string]map[int32]Offset) { + assigns = make(map[string]map[int32]Offset) + for topic, partitions := range setOffsets { + set := make(map[int32]Offset) + for partition, eo := range partitions { + set[partition] = Offset{ + at: eo.Offset, + epoch: eo.Epoch, + } + } + assigns[topic] = set + } + return assigns +} + +// findNewAssignments returns new partitions to consume at given offsets +// based off the current topics. +func (d *directConsumer) findNewAssignments() map[string]map[int32]Offset { + topics := d.tps.load() + + var rns reNews + if d.cfg.regex { + defer rns.log(d.cfg) + } + + toUse := make(map[string]map[int32]Offset, 10) + for topic, topicPartitions := range topics { + var useTopic bool + if d.cfg.regex { + want, seen := d.reSeen[topic] + if !seen { + for rawRe, re := range d.cfg.topics { + if want = re.MatchString(topic); want { + rns.add(rawRe, topic) + break + } + } + if !want { + rns.skip(topic) + } + d.reSeen[topic] = want + } + useTopic = want + } else { + useTopic = d.m.onlyt(topic) + } + + // If the above detected that we want to keep this topic, we + // set all partitions as usable. + // + // For internal partitions, we only allow consuming them if + // the topic is explicitly specified. + if useTopic { + partitions := topicPartitions.load() + if d.cfg.regex && partitions.isInternal || len(partitions.partitions) == 0 { + continue + } + toUseTopic := make(map[int32]Offset, len(partitions.partitions)) + for partition := range partitions.partitions { + toUseTopic[int32(partition)] = d.cfg.resetOffset + } + toUse[topic] = toUseTopic + } + + // Lastly, if this topic has some specific partitions pinned, + // we set those. We only use partitions from topics that have + // not been purged. + for topic := range d.m { + for partition, offset := range d.ps[topic] { + toUseTopic, exists := toUse[topic] + if !exists { + toUseTopic = make(map[int32]Offset, 10) + toUse[topic] = toUseTopic + } + toUseTopic[partition] = offset + } + } + } + + // With everything we want to consume, remove what we are already. + for topic, partitions := range d.using { + toUseTopic, exists := toUse[topic] + if !exists { + continue // metadata update did not return this topic (regex or failing load) + } + for partition := range partitions { + delete(toUseTopic, partition) + } + if len(toUseTopic) == 0 { + delete(toUse, topic) + } + } + + if len(toUse) == 0 { + return nil + } + + // Finally, toUse contains new partitions that we must consume. + // Add them to our using map and assign them. + for topic, partitions := range toUse { + topicUsing, exists := d.using[topic] + if !exists { + topicUsing = make(map[int32]struct{}) + d.using[topic] = topicUsing + } + for partition := range partitions { + topicUsing[partition] = struct{}{} + } + } + + return toUse +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go new file mode 100644 index 000000000000..c1946eb40e72 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go @@ -0,0 +1,2908 @@ +package kgo + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type groupConsumer struct { + c *consumer // used to change consumer state; generally c.mu is grabbed on access + cl *Client // used for running requests / adding to topics map + cfg *cfg + + ctx context.Context + cancel func() + manageDone chan struct{} // closed once when the manage goroutine quits + + cooperative atomicBool // true if the group balancer chosen during Join is cooperative + + // The data for topics that the user assigned. Metadata updates the + // atomic.Value in each pointer atomically. If we are consuming via + // regex, metadata grabs the lock to add new topics. + tps *topicsPartitions + + reSeen map[string]bool // topics we evaluated against regex, and whether we want them or not + + // Full lock grabbed in CommitOffsetsSync, read lock grabbed in + // CommitOffsets, this lock ensures that only one sync commit can + // happen at once, and if it is happening, no other commit can be + // happening. + syncCommitMu sync.RWMutex + + rejoinCh chan string // cap 1; sent to if subscription changes (regex) + + // For EOS, before we commit, we force a heartbeat. If the client and + // group member are both configured properly, then the transactional + // timeout will be less than the session timeout. By forcing a + // heartbeat before the commit, if the heartbeat was successful, then + // we ensure that we will complete the transaction within the group + // session, meaning we will not commit after the group has rebalanced. + heartbeatForceCh chan func(error) + + // The following two are only updated in the manager / join&sync loop + // The nowAssigned map is read when commits fail: if the commit fails + // with ILLEGAL_GENERATION and it contains only partitions that are in + // nowAssigned, we re-issue. + lastAssigned map[string][]int32 + nowAssigned amtps + + // Fetching ensures we continue fetching offsets across cooperative + // rebalance if an offset fetch returns early due to an immediate + // rebalance. See the large comment on adjustCooperativeFetchOffsets + // for more details. + // + // This is modified only in that function, or in the manage loop on a + // hard error once the heartbeat/fetch has returned. + fetching map[string]map[int32]struct{} + + // onFetchedMu ensures we do not call onFetched nor adjustOffsets + // concurrent with onRevoked. + // + // The group session itself ensures that OnPartitions functions are + // serial, but offset fetching is concurrent with heartbeating and can + // finish before or after heartbeating has already detected a revoke. + // To make user lives easier, we guarantee that offset fetch callbacks + // cannot be concurrent with onRevoked with this mu. If fetch callbacks + // are present, we hook this mu into onRevoked, and we grab it in the + // locations fetch callbacks are called. We only have to worry about + // onRevoked because fetching offsets occurs after onAssigned, and + // onLost happens after fetching offsets is done. + onFetchedMu sync.Mutex + + // leader is whether we are the leader right now. This is set to false + // + // - set to false at the beginning of a join group session + // - set to true if join group response indicates we are leader + // - read on metadata updates in findNewAssignments + leader atomicBool + + // Set to true when ending a transaction committing transaction + // offsets, and then set to false immediately after before calling + // EndTransaction. + offsetsAddedToTxn bool + + // If we are leader, then other members may express interest to consume + // topics that we are not interested in consuming. We track the entire + // group's topics in external, and our fetchMetadata loop uses this. + // We store this as a pointer for address comparisons. + external atomic.Value // *groupExternal + + // See the big comment on `commit`. If we allow committing between + // join&sync, we occasionally see RebalanceInProgress or + // IllegalGeneration errors while cooperative consuming. + noCommitDuringJoinAndSync sync.RWMutex + + ////////////// + // mu block // + ////////////// + mu sync.Mutex + + // using is updated when finding new assignments, we always add to this + // if we want to consume a topic (or see there are more potential + // partitions). Only the leader can trigger a new group session if there + // are simply more partitions for existing topics. + // + // This is read when joining a group or leaving a group. + using map[string]int // topics *we* are currently using => # partitions known in that topic + + // uncommitted is read and updated all over: + // - updated before PollFetches returns + // - updated when directly setting offsets (to rewind, for transactions) + // - emptied when leaving a group + // - updated when revoking + // - updated after fetching offsets once we receive our group assignment + // - updated after we commit + // - read when getting uncommitted or committed + uncommitted uncommitted + + // memberID and generation are written to in the join and sync loop, + // and mostly read within that loop. This can be read during commits, + // which can happy any time. It is **recommended** to be done within + // the context of a group session, but (a) users may have some unique + // use cases, and (b) the onRevoke hook may take longer than a user + // expects, which would rotate a session. + memberGen groupMemberGen + + // commitCancel and commitDone are set under mu before firing off an + // async commit request. If another commit happens, it cancels the + // prior commit, waits for the prior to be done, and then starts its + // own. + commitCancel func() + commitDone chan struct{} + + // blockAuto is set and cleared in CommitOffsets{,Sync} to block + // autocommitting if autocommitting is active. This ensures that an + // autocommit does not cancel the user's manual commit. + blockAuto bool + + // We set this once to manage the group lifecycle once. + managing bool + + dying bool // set when closing, read in findNewAssignments + left chan struct{} + leaveErr error // set before left is closed +} + +type groupMemberGen struct { + v atomic.Value // *groupMemberGenT +} + +type groupMemberGenT struct { + memberID string + generation int32 +} + +func (g *groupMemberGen) memberID() string { + memberID, _ := g.load() + return memberID +} + +func (g *groupMemberGen) generation() int32 { + _, generation := g.load() + return generation +} + +func (g *groupMemberGen) load() (memberID string, generation int32) { + v := g.v.Load() + if v == nil { + return "", -1 + } + t := v.(*groupMemberGenT) + return t.memberID, t.generation +} + +func (g *groupMemberGen) store(memberID string, generation int32) { + g.v.Store(&groupMemberGenT{memberID, generation}) +} + +func (g *groupMemberGen) storeMember(memberID string) { + g.store(memberID, g.generation()) +} + +// LeaveGroup leaves a group. Close automatically leaves the group, so this is +// only necessary to call if you plan to leave the group but continue to use +// the client. If a rebalance is in progress, this function waits for the +// rebalance to complete before the group can be left. This is necessary to +// allow you to safely issue one final offset commit in OnPartitionsRevoked. If +// you have overridden the default revoke, you must manually commit offsets +// before leaving the group. +// +// If you have configured the group with an InstanceID, this does not leave the +// group. With instance IDs, it is expected that clients will restart and +// re-use the same instance ID. To leave a group using an instance ID, you must +// manually issue a kmsg.LeaveGroupRequest or use an external tool (kafka +// scripts or kcl). +// +// It is recommended to use LeaveGroupContext to see if the leave was +// successful. +func (cl *Client) LeaveGroup() { + cl.LeaveGroupContext(cl.ctx) +} + +// LeaveGroup leaves a group. Close automatically leaves the group, so this is +// only necessary to call if you plan to leave the group but continue to use +// the client. If a rebalance is in progress, this function waits for the +// rebalance to complete before the group can be left. This is necessary to +// allow you to safely issue one final offset commit in OnPartitionsRevoked. If +// you have overridden the default revoke, you must manually commit offsets +// before leaving the group. +// +// The context can be used to avoid waiting for the client to leave the group. +// Not waiting may result in your client being stuck in the group and the +// partitions this client was consuming being stuck until the session timeout. +// This function returns any leave group error or context cancel error. If the +// context is nil, this immediately leaves the group and does not wait and does +// not return an error. +// +// If you have configured the group with an InstanceID, this does not leave the +// group. With instance IDs, it is expected that clients will restart and +// re-use the same instance ID. To leave a group using an instance ID, you must +// manually issue a kmsg.LeaveGroupRequest or use an external tool (kafka +// scripts or kcl). +func (cl *Client) LeaveGroupContext(ctx context.Context) error { + c := &cl.consumer + if c.g == nil { + return nil + } + var immediate bool + if ctx == nil { + var cancel func() + ctx, cancel = context.WithCancel(context.Background()) + cancel() + immediate = true + } + + go func() { + c.waitAndAddRebalance() + c.mu.Lock() // lock for assign + c.assignPartitions(nil, assignInvalidateAll, nil, "invalidating all assignments in LeaveGroup") + c.g.leave(ctx) + c.mu.Unlock() + c.unaddRebalance() + }() + + select { + case <-ctx.Done(): + if immediate { + return nil + } + return ctx.Err() + case <-c.g.left: + return c.g.leaveErr + } +} + +// GroupMetadata returns the current group member ID and generation, or an +// empty string and -1 if not in the group. +func (cl *Client) GroupMetadata() (string, int32) { + g := cl.consumer.g + if g == nil { + return "", -1 + } + return g.memberGen.load() +} + +func (c *consumer) initGroup() { + ctx, cancel := context.WithCancel(c.cl.ctx) + g := &groupConsumer{ + c: c, + cl: c.cl, + cfg: &c.cl.cfg, + + ctx: ctx, + cancel: cancel, + + reSeen: make(map[string]bool), + + manageDone: make(chan struct{}), + tps: newTopicsPartitions(), + rejoinCh: make(chan string, 1), + heartbeatForceCh: make(chan func(error)), + using: make(map[string]int), + + left: make(chan struct{}), + } + c.g = g + if !g.cfg.setCommitCallback { + g.cfg.commitCallback = g.defaultCommitCallback + } + + if g.cfg.txnID == nil { + // We only override revoked / lost if they were not explicitly + // set by options. + if !g.cfg.setRevoked { + g.cfg.onRevoked = g.defaultRevoke + } + // For onLost, we do not want to commit in onLost, so we + // explicitly set onLost to an empty function to avoid the + // fallback to onRevoked. + if !g.cfg.setLost { + g.cfg.onLost = func(context.Context, *Client, map[string][]int32) {} + } + } else { + g.cfg.autocommitDisable = true + } + + for _, logOn := range []struct { + name string + set *func(context.Context, *Client, map[string][]int32) + }{ + {"OnPartitionsAssigned", &g.cfg.onAssigned}, + {"OnPartitionsRevoked", &g.cfg.onRevoked}, + {"OnPartitionsLost", &g.cfg.onLost}, + } { + user := *logOn.set + name := logOn.name + *logOn.set = func(ctx context.Context, cl *Client, m map[string][]int32) { + var ctxExpired bool + select { + case <-ctx.Done(): + ctxExpired = true + default: + } + if ctxExpired { + cl.cfg.logger.Log(LogLevelDebug, "entering "+name, "with", m, "context_expired", ctxExpired) + } else { + cl.cfg.logger.Log(LogLevelDebug, "entering "+name, "with", m) + } + if user != nil { + dup := make(map[string][]int32) + for k, vs := range m { + dup[k] = append([]int32(nil), vs...) + } + user(ctx, cl, dup) + } + } + } + + if g.cfg.onFetched != nil || g.cfg.adjustOffsetsBeforeAssign != nil { + revoked := g.cfg.onRevoked + g.cfg.onRevoked = func(ctx context.Context, cl *Client, m map[string][]int32) { + g.onFetchedMu.Lock() + defer g.onFetchedMu.Unlock() + revoked(ctx, cl, m) + } + } + + // For non-regex topics, we explicitly ensure they exist for loading + // metadata. This is of no impact if we are *also* consuming via regex, + // but that is no problem. + if len(g.cfg.topics) > 0 && !g.cfg.regex { + topics := make([]string, 0, len(g.cfg.topics)) + for topic := range g.cfg.topics { + topics = append(topics, topic) + } + g.tps.storeTopics(topics) + } +} + +// Manages the group consumer's join / sync / heartbeat / fetch offset flow. +// +// Once a group is assigned, we fire a metadata request for all topics the +// assignment specified interest in. Only after we finally have some topic +// metadata do we join the group, and once joined, this management runs in a +// dedicated goroutine until the group is left. +func (g *groupConsumer) manage() { + defer close(g.manageDone) + g.cfg.logger.Log(LogLevelInfo, "beginning to manage the group lifecycle", "group", g.cfg.group) + if !g.cfg.autocommitDisable && g.cfg.autocommitInterval > 0 { + g.cfg.logger.Log(LogLevelInfo, "beginning autocommit loop", "group", g.cfg.group) + go g.loopCommit() + } + + var consecutiveErrors int + joinWhy := "beginning to manage the group lifecycle" + for { + if joinWhy == "" { + joinWhy = "rejoining from normal rebalance" + } + err := g.joinAndSync(joinWhy) + if err == nil { + if joinWhy, err = g.setupAssignedAndHeartbeat(); err != nil { + if errors.Is(err, kerr.RebalanceInProgress) { + err = nil + } + } + } + if err == nil { + consecutiveErrors = 0 + continue + } + joinWhy = "rejoining after we previously errored and backed off" + + // If the user has BlockPollOnRebalance enabled, we have to + // block around the onLost and assigning. + g.c.waitAndAddRebalance() + + if errors.Is(err, context.Canceled) && g.cfg.onRevoked != nil { + // The cooperative consumer does not revoke everything + // while rebalancing, meaning if our context is + // canceled, we may have uncommitted data. Rather than + // diving into onLost, we should go into onRevoked, + // because for the most part, a context cancelation + // means we are leaving the group. Going into onRevoked + // gives us an opportunity to commit outstanding + // offsets. For the eager consumer, since we always + // revoke before exiting the heartbeat loop, we do not + // really care so much about *needing* to call + // onRevoked, but since we are handling this case for + // the cooperative consumer we may as well just also + // include the eager consumer. + g.cfg.onRevoked(g.cl.ctx, g.cl, g.nowAssigned.read()) + } else { + // Any other error is perceived as a fatal error, + // and we go into onLost as appropriate. + if g.cfg.onLost != nil { + g.cfg.onLost(g.cl.ctx, g.cl, g.nowAssigned.read()) + } + g.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookGroupManageError); ok { + h.OnGroupManageError(err) + } + }) + g.c.addFakeReadyForDraining("", 0, &ErrGroupSession{err}, "notification of group management loop error") + } + + // If we are eager, we should have invalidated everything + // before getting here, but we do so doubly just in case. + // + // If we are cooperative, the join and sync could have failed + // during the cooperative rebalance where we were still + // consuming. We need to invalidate everything. Waiting to + // resume from poll is necessary, but the user will likely be + // unable to commit. + { + g.c.mu.Lock() + g.c.assignPartitions(nil, assignInvalidateAll, nil, "clearing assignment at end of group management session") + g.mu.Lock() // before allowing poll to touch uncommitted, lock the group + g.c.mu.Unlock() // now part of poll can continue + g.uncommitted = nil + g.mu.Unlock() + + g.nowAssigned.store(nil) + g.lastAssigned = nil + g.fetching = nil + + g.leader.Store(false) + g.resetExternal() + } + + // Unblock bolling now that we have called onLost and + // re-assigned. + g.c.unaddRebalance() + + if errors.Is(err, context.Canceled) { // context was canceled, quit now + return + } + + // Waiting for the backoff is a good time to update our + // metadata; maybe the error is from stale metadata. + consecutiveErrors++ + backoff := g.cfg.retryBackoff(consecutiveErrors) + g.cfg.logger.Log(LogLevelError, "join and sync loop errored", + "group", g.cfg.group, + "err", err, + "consecutive_errors", consecutiveErrors, + "backoff", backoff, + ) + deadline := time.Now().Add(backoff) + g.cl.waitmeta(g.ctx, backoff, "waitmeta during join & sync error backoff") + after := time.NewTimer(time.Until(deadline)) + select { + case <-g.ctx.Done(): + after.Stop() + return + case <-after.C: + } + } +} + +func (g *groupConsumer) leave(ctx context.Context) { + // If g.using is nonzero before this check, then a manage goroutine has + // started. If not, it will never start because we set dying. + g.mu.Lock() + wasDead := g.dying + g.dying = true + wasManaging := g.managing + g.cancel() + g.mu.Unlock() + + go func() { + if wasManaging { + // We want to wait for the manage goroutine to be done + // so that we call the user's on{Assign,RevokeLost}. + <-g.manageDone + } + if wasDead { + // If we already called leave(), then we just wait for + // the prior leave to finish and we avoid re-issuing a + // LeaveGroup request. + return + } + + defer close(g.left) + + if g.cfg.instanceID != nil { + return + } + + memberID := g.memberGen.memberID() + g.cfg.logger.Log(LogLevelInfo, "leaving group", + "group", g.cfg.group, + "member_id", memberID, + ) + // If we error when leaving, there is not much + // we can do. We may as well just return. + req := kmsg.NewPtrLeaveGroupRequest() + req.Group = g.cfg.group + req.MemberID = memberID + member := kmsg.NewLeaveGroupRequestMember() + member.MemberID = memberID + member.Reason = kmsg.StringPtr("client leaving group per normal operation") + req.Members = append(req.Members, member) + + resp, err := req.RequestWith(ctx, g.cl) + if err != nil { + g.leaveErr = err + return + } + g.leaveErr = kerr.ErrorForCode(resp.ErrorCode) + }() +} + +// returns the difference of g.nowAssigned and g.lastAssigned. +func (g *groupConsumer) diffAssigned() (added, lost map[string][]int32) { + nowAssigned := g.nowAssigned.clone() + if !g.cooperative.Load() { + return nowAssigned, nil + } + + added = make(map[string][]int32, len(nowAssigned)) + lost = make(map[string][]int32, len(nowAssigned)) + + // First, we diff lasts: any topic in last but not now is lost, + // otherwise, (1) new partitions are added, (2) common partitions are + // ignored, and (3) partitions no longer in now are lost. + lasts := make(map[int32]struct{}, 100) + for topic, lastPartitions := range g.lastAssigned { + nowPartitions, exists := nowAssigned[topic] + if !exists { + lost[topic] = lastPartitions + continue + } + + for _, lastPartition := range lastPartitions { + lasts[lastPartition] = struct{}{} + } + + // Anything now that does not exist in last is new, + // otherwise it is in common and we ignore it. + for _, nowPartition := range nowPartitions { + if _, exists := lasts[nowPartition]; !exists { + added[topic] = append(added[topic], nowPartition) + } else { + delete(lasts, nowPartition) + } + } + + // Anything remanining in last does not exist now + // and is thus lost. + for last := range lasts { + lost[topic] = append(lost[topic], last) + delete(lasts, last) // reuse lasts + } + } + + // Finally, any new topics in now assigned are strictly added. + for topic, nowPartitions := range nowAssigned { + if _, exists := g.lastAssigned[topic]; !exists { + added[topic] = nowPartitions + } + } + + return added, lost +} + +type revokeStage int8 + +const ( + revokeLastSession = iota + revokeThisSession +) + +// revoke calls onRevoked for partitions that this group member is losing and +// updates the uncommitted map after the revoke. +// +// For eager consumers, this simply revokes g.assigned. This will only be +// called at the end of a group session. +// +// For cooperative consumers, this either +// +// (1) if revoking lost partitions from a prior session (i.e., after sync), +// this revokes the passed in lost +// (2) if revoking at the end of a session, this revokes topics that the +// consumer is no longer interested in consuming +// +// Lastly, for cooperative consumers, this must selectively delete what was +// lost from the uncommitted map. +func (g *groupConsumer) revoke(stage revokeStage, lost map[string][]int32, leaving bool) { + g.c.waitAndAddRebalance() + defer g.c.unaddRebalance() + + if !g.cooperative.Load() || leaving { // stage == revokeThisSession if not cooperative + // If we are an eager consumer, we stop fetching all of our + // current partitions as we will be revoking them. + g.c.mu.Lock() + if leaving { + g.c.assignPartitions(nil, assignInvalidateAll, nil, "revoking all assignments because we are leaving the group") + } else { + g.c.assignPartitions(nil, assignInvalidateAll, nil, "revoking all assignments because we are not cooperative") + } + g.c.mu.Unlock() + + if !g.cooperative.Load() { + g.cfg.logger.Log(LogLevelInfo, "eager consumer revoking prior assigned partitions", "group", g.cfg.group, "revoking", g.nowAssigned.read()) + } else { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer revoking prior assigned partitions because leaving group", "group", g.cfg.group, "revoking", g.nowAssigned.read()) + } + if g.cfg.onRevoked != nil { + g.cfg.onRevoked(g.cl.ctx, g.cl, g.nowAssigned.read()) + } + g.nowAssigned.store(nil) + g.lastAssigned = nil + + // After nilling uncommitted here, nothing should recreate + // uncommitted until a future fetch after the group is + // rejoined. This _can_ be broken with a manual SetOffsets or + // with CommitOffsets{,Sync} but we explicitly document not + // to do that outside the context of a live group session. + g.mu.Lock() + g.uncommitted = nil + g.mu.Unlock() + return + } + + switch stage { + case revokeLastSession: + // we use lost in this case + + case revokeThisSession: + // lost is nil for cooperative assigning. Instead, we determine + // lost by finding subscriptions we are no longer interested + // in. This would be from a user's PurgeConsumeTopics call. + // + // We just paused metadata, but purging triggers a rebalance + // which causes a new metadata request -- in short, this could + // be concurrent with a metadata findNewAssignments, so we + // lock. + g.nowAssigned.write(func(nowAssigned map[string][]int32) { + g.mu.Lock() + for topic, partitions := range nowAssigned { + if _, exists := g.using[topic]; !exists { + if lost == nil { + lost = make(map[string][]int32) + } + lost[topic] = partitions + delete(nowAssigned, topic) + } + } + g.mu.Unlock() + }) + } + + if len(lost) > 0 { + // We must now stop fetching anything we lost and invalidate + // any buffered fetches before falling into onRevoked. + // + // We want to invalidate buffered fetches since they may + // contain partitions that we lost, and we do not want a future + // poll to return those fetches. + lostOffsets := make(map[string]map[int32]Offset, len(lost)) + + for lostTopic, lostPartitions := range lost { + lostPartitionOffsets := make(map[int32]Offset, len(lostPartitions)) + for _, lostPartition := range lostPartitions { + lostPartitionOffsets[lostPartition] = Offset{} + } + lostOffsets[lostTopic] = lostPartitionOffsets + } + + // We must invalidate before revoking and before updating + // uncommitted, because we want any commits in onRevoke to be + // for the final polled offsets. We do not want to allow the + // logical race of allowing fetches for revoked partitions + // after a revoke but before an invalidation. + g.c.mu.Lock() + g.c.assignPartitions(lostOffsets, assignInvalidateMatching, g.tps, "revoking assignments from cooperative consuming") + g.c.mu.Unlock() + } + + if len(lost) > 0 || stage == revokeThisSession { + if len(lost) == 0 { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer calling onRevoke at the end of a session even though no partitions were lost", "group", g.cfg.group) + } else { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer calling onRevoke", "group", g.cfg.group, "lost", lost, "stage", stage) + } + if g.cfg.onRevoked != nil { + g.cfg.onRevoked(g.cl.ctx, g.cl, lost) + } + } + + if len(lost) == 0 { // if we lost nothing, do nothing + return + } + + if stage != revokeThisSession { // cooperative consumers rejoin after they revoking what they lost + defer g.rejoin("cooperative rejoin after revoking what we lost from a rebalance") + } + + // The block below deletes everything lost from our uncommitted map. + // All commits should be **completed** by the time this runs. An async + // commit can undo what we do below. The default revoke runs a sync + // commit. + g.mu.Lock() + defer g.mu.Unlock() + if g.uncommitted == nil { + return + } + for lostTopic, lostPartitions := range lost { + uncommittedPartitions := g.uncommitted[lostTopic] + if uncommittedPartitions == nil { + continue + } + for _, lostPartition := range lostPartitions { + delete(uncommittedPartitions, lostPartition) + } + if len(uncommittedPartitions) == 0 { + delete(g.uncommitted, lostTopic) + } + } + if len(g.uncommitted) == 0 { + g.uncommitted = nil + } +} + +// assignRevokeSession aids in sequencing prerevoke/assign/revoke. +type assignRevokeSession struct { + prerevokeDone chan struct{} + assignDone chan struct{} + revokeDone chan struct{} +} + +func newAssignRevokeSession() *assignRevokeSession { + return &assignRevokeSession{ + prerevokeDone: make(chan struct{}), + assignDone: make(chan struct{}), + revokeDone: make(chan struct{}), + } +} + +// For cooperative consumers, the first thing a cooperative consumer does is to +// diff its last assignment and its new assignment and revoke anything lost. +// We call this a "prerevoke". +func (s *assignRevokeSession) prerevoke(g *groupConsumer, lost map[string][]int32) <-chan struct{} { + go func() { + defer close(s.prerevokeDone) + if g.cooperative.Load() && len(lost) > 0 { + g.revoke(revokeLastSession, lost, false) + } + }() + return s.prerevokeDone +} + +func (s *assignRevokeSession) assign(g *groupConsumer, newAssigned map[string][]int32) <-chan struct{} { + go func() { + defer close(s.assignDone) + <-s.prerevokeDone + if g.cfg.onAssigned != nil { + // We always call on assigned, even if nothing new is + // assigned. This allows consumers to know that + // assignment is done and do setup logic. + // + // If configured, we have to block polling. + g.c.waitAndAddRebalance() + defer g.c.unaddRebalance() + g.cfg.onAssigned(g.cl.ctx, g.cl, newAssigned) + } + }() + return s.assignDone +} + +// At the end of a group session, before we leave the heartbeat loop, we call +// revoke. For non-cooperative consumers, this revokes everything in the +// current session, and before revoking, we invalidate all partitions. For the +// cooperative consumer, this does nothing but does notify the client that a +// revoke has begun / the group session is ending. +// +// This may not run before returning from the heartbeat loop: if we encounter a +// fatal error, we return before revoking so that we can instead call onLost in +// the manage loop. +func (s *assignRevokeSession) revoke(g *groupConsumer, leaving bool) <-chan struct{} { + go func() { + defer close(s.revokeDone) + <-s.assignDone + g.revoke(revokeThisSession, nil, leaving) + }() + return s.revokeDone +} + +// This chunk of code "pre" revokes lost partitions for the cooperative +// consumer and then begins heartbeating while fetching offsets. This returns +// when heartbeating errors (or if fetch offsets errors). +// +// Before returning, this function ensures that +// - onAssigned is complete +// - which ensures that pre revoking is complete +// - fetching is complete +// - heartbeating is complete +func (g *groupConsumer) setupAssignedAndHeartbeat() (string, error) { + type hbquit struct { + rejoinWhy string + err error + } + hbErrCh := make(chan hbquit, 1) + fetchErrCh := make(chan error, 1) + + s := newAssignRevokeSession() + added, lost := g.diffAssigned() + g.lastAssigned = g.nowAssigned.clone() // now that we are done with our last assignment, update it per the new assignment + + g.cfg.logger.Log(LogLevelInfo, "new group session begun", "group", g.cfg.group, "added", mtps(added), "lost", mtps(lost)) + s.prerevoke(g, lost) // for cooperative consumers + + // Since we have joined the group, we immediately begin heartbeating. + // This will continue until the heartbeat errors, the group is killed, + // or the fetch offsets below errors. + ctx, cancel := context.WithCancel(g.ctx) + go func() { + defer cancel() // potentially kill offset fetching + g.cfg.logger.Log(LogLevelInfo, "beginning heartbeat loop", "group", g.cfg.group) + rejoinWhy, err := g.heartbeat(fetchErrCh, s) + hbErrCh <- hbquit{rejoinWhy, err} + }() + + // We immediately begin fetching offsets. We want to wait until the + // fetch function returns, since it assumes within it that another + // assign cannot happen (it assigns partitions itself). Returning + // before the fetch completes would be not good. + // + // The difference between fetchDone and fetchErrCh is that fetchErrCh + // can kill heartbeating, or signal it to continue, while fetchDone + // is specifically used for this function's return. + fetchDone := make(chan struct{}) + defer func() { <-fetchDone }() + + // Before we fetch offsets, we wait for the user's onAssign callback to + // be done. This ensures a few things: + // + // * that we wait for for prerevoking to be done, which updates the + // uncommitted field. Waiting for that ensures that a rejoin and poll + // does not have weird concurrent interaction. + // + // * that our onLost will not be concurrent with onAssign + // + // * that the user can start up any per-partition processors necessary + // before we begin consuming that partition. + // + // We especially need to wait here because heartbeating may not + // necessarily run onRevoke before returning (because of a fatal + // error). + s.assign(g, added) + + // If cooperative consuming, we may have to resume fetches. See the + // comment on adjustCooperativeFetchOffsets. + // + // We do this AFTER the user's callback. If we add more partitions + // to `added` that are from a previously canceled fetch, we do NOT + // want to pass those fetch-resumed partitions to the user callback + // again. See #705. + if g.cooperative.Load() { + added = g.adjustCooperativeFetchOffsets(added, lost) + } + + <-s.assignDone + + if len(added) > 0 { + go func() { + defer close(fetchDone) + defer close(fetchErrCh) + fetchErrCh <- g.fetchOffsets(ctx, added) + }() + } else { + close(fetchDone) + close(fetchErrCh) + } + + // Finally, we simply return whatever the heartbeat error is. This will + // be the fetch offset error if that function is what killed this. + + done := <-hbErrCh + return done.rejoinWhy, done.err +} + +// heartbeat issues heartbeat requests to Kafka for the duration of a group +// session. +// +// This function begins before fetching offsets to allow the consumer's +// onAssigned to be called before fetching. If the eventual offset fetch +// errors, we continue heartbeating until onRevoked finishes and our metadata +// is updated. If the error is not RebalanceInProgress, we return immediately. +// +// If the offset fetch is successful, then we basically sit in this function +// until a heartbeat errors or we, being the leader, decide to re-join. +func (g *groupConsumer) heartbeat(fetchErrCh <-chan error, s *assignRevokeSession) (string, error) { + ticker := time.NewTicker(g.cfg.heartbeatInterval) + defer ticker.Stop() + + // We issue one heartbeat quickly if we are cooperative because + // cooperative consumers rejoin the group immediately, and we want to + // detect that in 500ms rather than 3s. + var cooperativeFastCheck <-chan time.Time + if g.cooperative.Load() { + cooperativeFastCheck = time.After(500 * time.Millisecond) + } + + var metadone, revoked <-chan struct{} + var heartbeat, didMetadone, didRevoke bool + var rejoinWhy string + var lastErr error + + ctxCh := g.ctx.Done() + + for { + var err error + var force func(error) + heartbeat = false + select { + case <-cooperativeFastCheck: + heartbeat = true + case <-ticker.C: + heartbeat = true + case force = <-g.heartbeatForceCh: + heartbeat = true + case rejoinWhy = <-g.rejoinCh: + // If a metadata update changes our subscription, + // we just pretend we are rebalancing. + g.cfg.logger.Log(LogLevelInfo, "forced rejoin quitting heartbeat loop", "why", rejoinWhy) + err = kerr.RebalanceInProgress + case err = <-fetchErrCh: + fetchErrCh = nil + case <-metadone: + metadone = nil + didMetadone = true + case <-revoked: + revoked = nil + didRevoke = true + case <-ctxCh: + // Even if the group is left, we need to wait for our + // revoke to finish before returning, otherwise the + // manage goroutine will race with us setting + // nowAssigned. + ctxCh = nil + err = context.Canceled + } + + if heartbeat { + g.cfg.logger.Log(LogLevelDebug, "heartbeating", "group", g.cfg.group) + req := kmsg.NewPtrHeartbeatRequest() + req.Group = g.cfg.group + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + var resp *kmsg.HeartbeatResponse + if resp, err = req.RequestWith(g.ctx, g.cl); err == nil { + err = kerr.ErrorForCode(resp.ErrorCode) + } + g.cfg.logger.Log(LogLevelDebug, "heartbeat complete", "group", g.cfg.group, "err", err) + if force != nil { + force(err) + } + } + + // The first error either triggers a clean revoke and metadata + // update or it returns immediately. If we triggered the + // revoke, we wait for it to complete regardless of any future + // error. + if didMetadone && didRevoke { + return rejoinWhy, lastErr + } + + if err == nil { + continue + } + + if lastErr == nil { + g.cfg.logger.Log(LogLevelInfo, "heartbeat errored", "group", g.cfg.group, "err", err) + } else { + g.cfg.logger.Log(LogLevelInfo, "heartbeat errored again while waiting for user revoke to finish", "group", g.cfg.group, "err", err) + } + + // Since we errored, we must revoke. + if !didRevoke && revoked == nil { + // If our error is not from rebalancing, then we + // encountered IllegalGeneration or UnknownMemberID or + // our context closed all of which are unexpected and + // unrecoverable. + // + // We return early rather than revoking and updating + // metadata; the groupConsumer's manage function will + // call onLost with all partitions. + // + // setupAssignedAndHeartbeat still waits for onAssigned + // to be done so that we avoid calling onLost + // concurrently. + if !errors.Is(err, kerr.RebalanceInProgress) && revoked == nil { + return "", err + } + + // Now we call the user provided revoke callback, even + // if cooperative: if cooperative, this only revokes + // partitions we no longer want to consume. + // + // If the err is context.Canceled, the group is being + // left and we revoke everything. + revoked = s.revoke(g, errors.Is(err, context.Canceled)) + } + // Since we errored, while waiting for the revoke to finish, we + // update our metadata. A leader may have re-joined with new + // metadata, and we want the update. + if !didMetadone && metadone == nil { + waited := make(chan struct{}) + metadone = waited + go func() { + g.cl.waitmeta(g.ctx, g.cfg.sessionTimeout, "waitmeta after heartbeat error") + close(waited) + }() + } + + // We always save the latest error; generally this should be + // REBALANCE_IN_PROGRESS, but if the revoke takes too long, + // Kafka may boot us and we will get a different error. + lastErr = err + } +} + +// ForceRebalance quits a group member's heartbeat loop so that the member +// rejoins with a JoinGroupRequest. +// +// This function is only useful if you either (a) know that the group member is +// a leader, and want to force a rebalance for any particular reason, or (b) +// are using a custom group balancer, and have changed the metadata that will +// be returned from its JoinGroupMetadata method. This function has no other +// use; see KIP-568 for more details around this function's motivation. +// +// If neither of the cases above are true (this member is not a leader, and the +// join group metadata has not changed), then Kafka will not actually trigger a +// rebalance and will instead reply to the member with its current assignment. +func (cl *Client) ForceRebalance() { + if g := cl.consumer.g; g != nil { + g.rejoin("rejoin from ForceRebalance") + } +} + +// rejoin is called after a cooperative member revokes what it lost at the +// beginning of a session, or if we are leader and detect new partitions to +// consume. +func (g *groupConsumer) rejoin(why string) { + select { + case g.rejoinCh <- why: + default: + } +} + +// Joins and then syncs, issuing the two slow requests in goroutines to allow +// for group cancelation to return early. +func (g *groupConsumer) joinAndSync(joinWhy string) error { + g.noCommitDuringJoinAndSync.Lock() + g.cfg.logger.Log(LogLevelDebug, "blocking commits from join&sync") + defer g.noCommitDuringJoinAndSync.Unlock() + defer g.cfg.logger.Log(LogLevelDebug, "unblocking commits from join&sync") + + g.cfg.logger.Log(LogLevelInfo, "joining group", "group", g.cfg.group) + g.leader.Store(false) + g.getAndResetExternalRejoin() + defer func() { + // If we are not leader, we clear any tracking of external + // topics from when we were previously leader, since tracking + // these is just a waste. + if !g.leader.Load() { + g.resetExternal() + } + }() + +start: + select { + case <-g.rejoinCh: // drain to avoid unnecessary rejoins + default: + } + + joinReq := kmsg.NewPtrJoinGroupRequest() + joinReq.Group = g.cfg.group + joinReq.SessionTimeoutMillis = int32(g.cfg.sessionTimeout.Milliseconds()) + joinReq.RebalanceTimeoutMillis = int32(g.cfg.rebalanceTimeout.Milliseconds()) + joinReq.ProtocolType = g.cfg.protocol + joinReq.MemberID = g.memberGen.memberID() + joinReq.InstanceID = g.cfg.instanceID + joinReq.Protocols = g.joinGroupProtocols() + if joinWhy != "" { + joinReq.Reason = kmsg.StringPtr(joinWhy) + } + var ( + joinResp *kmsg.JoinGroupResponse + err error + joined = make(chan struct{}) + ) + + // NOTE: For this function, we have to use the client context, not the + // group context. We want to allow people to issue one final commit in + // OnPartitionsRevoked before leaving a group, so we need to block + // commits during join&sync. If we used the group context, we would be + // cancled immediately when leaving while a join or sync is inflight, + // and then our final commit will receive either REBALANCE_IN_PROGRESS + // or ILLEGAL_GENERATION. + + go func() { + defer close(joined) + joinResp, err = joinReq.RequestWith(g.cl.ctx, g.cl) + }() + + select { + case <-joined: + case <-g.cl.ctx.Done(): + return g.cl.ctx.Err() // client closed + } + if err != nil { + return err + } + + restart, protocol, plan, err := g.handleJoinResp(joinResp) + if restart { + goto start + } + if err != nil { + g.cfg.logger.Log(LogLevelWarn, "join group failed", "group", g.cfg.group, "err", err) + return err + } + + syncReq := kmsg.NewPtrSyncGroupRequest() + syncReq.Group = g.cfg.group + memberID, generation := g.memberGen.load() + syncReq.Generation = generation + syncReq.MemberID = memberID + syncReq.InstanceID = g.cfg.instanceID + syncReq.ProtocolType = &g.cfg.protocol + syncReq.Protocol = &protocol + if !joinResp.SkipAssignment { + syncReq.GroupAssignment = plan // nil unless we are the leader + } + var ( + syncResp *kmsg.SyncGroupResponse + synced = make(chan struct{}) + ) + + g.cfg.logger.Log(LogLevelInfo, "syncing", "group", g.cfg.group, "protocol_type", g.cfg.protocol, "protocol", protocol) + go func() { + defer close(synced) + syncResp, err = syncReq.RequestWith(g.cl.ctx, g.cl) + }() + + select { + case <-synced: + case <-g.cl.ctx.Done(): + return g.cl.ctx.Err() + } + if err != nil { + return err + } + + if err = g.handleSyncResp(protocol, syncResp); err != nil { + if errors.Is(err, kerr.RebalanceInProgress) { + g.cfg.logger.Log(LogLevelInfo, "sync failed with RebalanceInProgress, rejoining", "group", g.cfg.group) + goto start + } + g.cfg.logger.Log(LogLevelWarn, "sync group failed", "group", g.cfg.group, "err", err) + return err + } + + // KIP-814 fixes one limitation with KIP-345, but has another + // fundamental limitation. When an instance ID leader restarts, its + // first join always gets its old assignment *even if* the member's + // topic interests have changed. The broker tells us to skip doing + // assignment ourselves, but we ignore that for our well known + // balancers. Instead, we balance (but avoid sending it while syncing, + // as we are supposed to), and if our sync assignment differs from our + // own calculated assignment, We know we have a stale broker assignment + // and must trigger a rebalance. + if plan != nil && joinResp.SkipAssignment { + for _, assign := range plan { + if assign.MemberID == memberID { + if !bytes.Equal(assign.MemberAssignment, syncResp.MemberAssignment) { + g.rejoin("instance group leader restarted and was reassigned old plan, our topic interests changed and we must rejoin to force a rebalance") + } + break + } + } + } + + return nil +} + +func (g *groupConsumer) handleJoinResp(resp *kmsg.JoinGroupResponse) (restart bool, protocol string, plan []kmsg.SyncGroupRequestGroupAssignment, err error) { + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + switch err { + case kerr.MemberIDRequired: + g.memberGen.storeMember(resp.MemberID) // KIP-394 + g.cfg.logger.Log(LogLevelInfo, "join returned MemberIDRequired, rejoining with response's MemberID", "group", g.cfg.group, "member_id", resp.MemberID) + return true, "", nil, nil + case kerr.UnknownMemberID: + g.memberGen.storeMember("") + g.cfg.logger.Log(LogLevelInfo, "join returned UnknownMemberID, rejoining without a member id", "group", g.cfg.group) + return true, "", nil, nil + } + return // Request retries as necessary, so this must be a failure + } + g.memberGen.store(resp.MemberID, resp.Generation) + + if resp.Protocol != nil { + protocol = *resp.Protocol + } + + for _, balancer := range g.cfg.balancers { + if protocol == balancer.ProtocolName() { + cooperative := balancer.IsCooperative() + if !cooperative && g.cooperative.Load() { + g.cfg.logger.Log(LogLevelWarn, "downgrading from cooperative group to eager group, this is not supported per KIP-429!") + } + g.cooperative.Store(cooperative) + break + } + } + + // KIP-345 has a fundamental limitation that KIP-814 also does not + // solve. + // + // When using instance IDs, if a leader restarts, its first join + // receives its old assignment no matter what. KIP-345 resulted in + // leaderless consumer groups, KIP-814 fixes this by notifying the + // restarted leader that it is still leader but that it should not + // balance. + // + // If the join response is <= v8, we hackily work around the leaderless + // situation by checking if the LeaderID is prefixed with our + // InstanceID. This is how Kafka and Redpanda are both implemented. At + // worst, if we mis-predict the leader, then we may accidentally try to + // cause a rebalance later and it will do nothing. That's fine. At + // least we can cause rebalances now, rather than having a leaderless, + // not-ever-rebalancing client. + // + // KIP-814 does not solve our problem fully: if we restart and rejoin, + // we always get our old assignment even if we changed what topics we + // were interested in. Because we have our old assignment, we think + // that the plan is fine *even with* our new interests, and we wait for + // some external rebalance trigger. We work around this limitation + // above (see "KIP-814") only for well known balancers; we cannot work + // around this limitation for not well known balancers because they may + // do so weird things we cannot control nor reason about. + leader := resp.LeaderID == resp.MemberID + leaderNoPlan := !leader && resp.Version <= 8 && g.cfg.instanceID != nil && strings.HasPrefix(resp.LeaderID, *g.cfg.instanceID+"-") + if leader { + g.leader.Store(true) + g.cfg.logger.Log(LogLevelInfo, "joined, balancing group", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "balance_protocol", protocol, + "leader", true, + ) + plan, err = g.balanceGroup(protocol, resp.Members, resp.SkipAssignment) + } else if leaderNoPlan { + g.leader.Store(true) + g.cfg.logger.Log(LogLevelInfo, "joined as leader but unable to balance group due to KIP-345 limitations", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "balance_protocol", protocol, + "leader", true, + ) + } else { + g.cfg.logger.Log(LogLevelInfo, "joined", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "leader", false, + ) + } + return +} + +type strptr struct { + s *string +} + +func (s strptr) String() string { + if s.s == nil { + return "" + } + return *s.s +} + +// If other group members consume topics we are not interested in, we track the +// entire group's topics in this groupExternal type. On metadata update, we see +// if any partitions for any of these topics have changed, and if so, we as +// leader rejoin the group. +// +// Our external topics are cleared whenever we join and are not leader. We keep +// our previous external topics if we are leader: on the first balance as +// leader, we request metadata for all topics, then on followup balances, we +// already have that metadata and do not need to reload it when balancing. +// +// Whenever metadata updates, we detect if a rejoin is needed and always reset +// the rejoin status. +type groupExternal struct { + tps atomic.Value // map[string]int32 + rejoin atomicBool +} + +func (g *groupConsumer) loadExternal() *groupExternal { + e := g.external.Load() + if e != nil { + return e.(*groupExternal) + } + return nil +} + +// We reset our external topics whenever join&sync loop errors, or when we join +// and are not leader. +func (g *groupConsumer) resetExternal() { + g.external.Store((*groupExternal)(nil)) +} + +// If this is our first join as leader, or if a new member joined with new +// topics we were not tracking, we re-initialize external with the all-topics +// metadata refresh. +func (g *groupConsumer) initExternal(current map[string]int32) { + var e groupExternal + e.tps.Store(dupmsi32(current)) + g.external.Store(&e) +} + +// Reset whenever we join, & potentially used to rejoin when finding new +// assignments (i.e., end of metadata). +func (g *groupConsumer) getAndResetExternalRejoin() bool { + e := g.loadExternal() + if e == nil { + return false + } + defer e.rejoin.Store(false) + return e.rejoin.Load() +} + +// Runs fn over a load, not copy, of our map. +func (g *groupExternal) fn(fn func(map[string]int32)) { + if g == nil { + return + } + v := g.tps.Load() + if v == nil { + return + } + tps := v.(map[string]int32) + fn(tps) +} + +// Runs fn over a clone of our external map and updates the map. +func (g *groupExternal) cloned(fn func(map[string]int32)) { + g.fn(func(tps map[string]int32) { + dup := dupmsi32(tps) + fn(dup) + g.tps.Store(dup) + }) +} + +func (g *groupExternal) eachTopic(fn func(string)) { + g.fn(func(tps map[string]int32) { + for t := range tps { + fn(t) + } + }) +} + +func (g *groupExternal) updateLatest(meta map[string]*metadataTopic) { + g.cloned(func(tps map[string]int32) { + var rejoin bool + for t, ps := range tps { + latest, exists := meta[t] + if !exists || latest.loadErr != nil { + continue + } + if psLatest := int32(len(latest.partitions)); psLatest != ps { + rejoin = true + tps[t] = psLatest + } + } + if rejoin { + g.rejoin.Store(true) + } + }) +} + +func (g *groupConsumer) handleSyncResp(protocol string, resp *kmsg.SyncGroupResponse) error { + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return err + } + + b, err := g.findBalancer("sync assignment", protocol) + if err != nil { + return err + } + + assigned, err := b.ParseSyncAssignment(resp.MemberAssignment) + if err != nil { + g.cfg.logger.Log(LogLevelError, "sync assignment parse failed", "group", g.cfg.group, "err", err) + return err + } + + g.cfg.logger.Log(LogLevelInfo, "synced", "group", g.cfg.group, "assigned", mtps(assigned)) + + // Past this point, we will fall into the setupAssigned prerevoke code, + // meaning for cooperative, we will revoke what we need to. + g.nowAssigned.store(assigned) + return nil +} + +func (g *groupConsumer) joinGroupProtocols() []kmsg.JoinGroupRequestProtocol { + g.mu.Lock() + + topics := make([]string, 0, len(g.using)) + for topic := range g.using { + topics = append(topics, topic) + } + lastDup := make(map[string][]int32, len(g.lastAssigned)) + for t, ps := range g.lastAssigned { + lastDup[t] = append([]int32(nil), ps...) // deep copy to allow modifications + } + + g.mu.Unlock() + + sort.Strings(topics) // we guarantee to JoinGroupMetadata that the input strings are sorted + for _, partitions := range lastDup { + sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) // same for partitions + } + + gen := g.memberGen.generation() + var protos []kmsg.JoinGroupRequestProtocol + for _, balancer := range g.cfg.balancers { + proto := kmsg.NewJoinGroupRequestProtocol() + proto.Name = balancer.ProtocolName() + proto.Metadata = balancer.JoinGroupMetadata(topics, lastDup, gen) + protos = append(protos, proto) + } + return protos +} + +// If we are cooperatively consuming, we have a potential problem: if fetch +// offsets is canceled due to an immediate rebalance, when we resume, we will +// not re-fetch offsets for partitions we were previously assigned and are +// still assigned. We will only fetch offsets for new assignments. +// +// To work around that issue, we track everything we are fetching in g.fetching +// and only clear g.fetching if fetchOffsets returns with no error. +// +// Now, if fetching returns early due to an error, when we rejoin and re-fetch, +// we will resume fetching what we were previously: +// +// - first we remove what was lost +// - then we add anything new +// - then we translate our total set into the "added" list to be fetched on return +// +// Any time a group is completely lost, the manage loop clears fetching. When +// cooperative consuming, a hard error is basically losing the entire state and +// rejoining from scratch. +func (g *groupConsumer) adjustCooperativeFetchOffsets(added, lost map[string][]int32) map[string][]int32 { + if g.fetching != nil { + // We were fetching previously: remove anything lost. + for topic, partitions := range lost { + ft := g.fetching[topic] + if ft == nil { + continue // we were not fetching this topic + } + for _, partition := range partitions { + delete(ft, partition) + } + if len(ft) == 0 { + delete(g.fetching, topic) + } + } + } else { + // We were not fetching previously: start a new map for what we + // are adding. + g.fetching = make(map[string]map[int32]struct{}) + } + + // Merge everything we are newly fetching to our fetching map. + for topic, partitions := range added { + ft := g.fetching[topic] + if ft == nil { + ft = make(map[int32]struct{}, len(partitions)) + g.fetching[topic] = ft + } + for _, partition := range partitions { + ft[partition] = struct{}{} + } + } + + // Now translate our full set (previously fetching ++ newly fetching -- + // lost) into a new "added" map to be fetched. + added = make(map[string][]int32, len(g.fetching)) + for topic, partitions := range g.fetching { + ps := make([]int32, 0, len(partitions)) + for partition := range partitions { + ps = append(ps, partition) + } + added[topic] = ps + } + return added +} + +// fetchOffsets is issued once we join a group to see what the prior commits +// were for the partitions we were assigned. +func (g *groupConsumer) fetchOffsets(ctx context.Context, added map[string][]int32) (rerr error) { // we must use "rerr"! see introducing commit + // If we fetch successfully, we can clear the cross-group-cycle + // fetching tracking. + defer func() { + if rerr == nil { + g.fetching = nil + } + }() + + // Our client maps the v0 to v7 format to v8+ when sharding this + // request, if we are only requesting one group, as well as maps the + // response back, so we do not need to worry about v8+ here. +start: + req := kmsg.NewPtrOffsetFetchRequest() + req.Group = g.cfg.group + req.RequireStable = g.cfg.requireStable + for topic, partitions := range added { + reqTopic := kmsg.NewOffsetFetchRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = partitions + req.Topics = append(req.Topics, reqTopic) + } + + var resp *kmsg.OffsetFetchResponse + var err error + + fetchDone := make(chan struct{}) + go func() { + defer close(fetchDone) + resp, err = req.RequestWith(ctx, g.cl) + }() + select { + case <-fetchDone: + case <-ctx.Done(): + g.cfg.logger.Log(LogLevelInfo, "fetch offsets failed due to context cancelation", "group", g.cfg.group) + return ctx.Err() + } + if err != nil { + g.cfg.logger.Log(LogLevelError, "fetch offsets failed with non-retryable error", "group", g.cfg.group, "err", err) + return err + } + + // Even if a leader epoch is returned, if brokers do not support + // OffsetForLeaderEpoch for some reason (odd set of supported reqs), we + // cannot use the returned leader epoch. + kip320 := g.cl.supportsOffsetForLeaderEpoch() + + offsets := make(map[string]map[int32]Offset) + for _, rTopic := range resp.Topics { + topicOffsets := make(map[int32]Offset) + offsets[rTopic.Topic] = topicOffsets + for _, rPartition := range rTopic.Partitions { + if err = kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + // KIP-447: Unstable offset commit means there is a + // pending transaction that should be committing soon. + // We sleep for 1s and retry fetching offsets. + if errors.Is(err, kerr.UnstableOffsetCommit) { + g.cfg.logger.Log(LogLevelInfo, "fetch offsets failed with UnstableOffsetCommit, waiting 1s and retrying", + "group", g.cfg.group, + "topic", rTopic.Topic, + "partition", rPartition.Partition, + ) + select { + case <-ctx.Done(): + case <-time.After(time.Second): + goto start + } + } + g.cfg.logger.Log(LogLevelError, "fetch offsets failed", + "group", g.cfg.group, + "topic", rTopic.Topic, + "partition", rPartition.Partition, + "err", err, + ) + return err + } + offset := Offset{ + at: rPartition.Offset, + epoch: -1, + } + if resp.Version >= 5 && kip320 { // KIP-320 + offset.epoch = rPartition.LeaderEpoch + } + if rPartition.Offset == -1 { + offset = g.cfg.resetOffset + } + topicOffsets[rPartition.Partition] = offset + } + } + + groupTopics := g.tps.load() + for fetchedTopic := range offsets { + if !groupTopics.hasTopic(fetchedTopic) { + delete(offsets, fetchedTopic) + g.cfg.logger.Log(LogLevelWarn, "member was assigned topic that we did not ask for in ConsumeTopics! skipping assigning this topic!", "group", g.cfg.group, "topic", fetchedTopic) + } + } + + if g.cfg.onFetched != nil { + g.onFetchedMu.Lock() + err = g.cfg.onFetched(ctx, g.cl, resp) + g.onFetchedMu.Unlock() + if err != nil { + return err + } + } + if g.cfg.adjustOffsetsBeforeAssign != nil { + g.onFetchedMu.Lock() + offsets, err = g.cfg.adjustOffsetsBeforeAssign(ctx, offsets) + g.onFetchedMu.Unlock() + if err != nil { + return err + } + } + + // Lock for assign and then updating uncommitted. + g.c.mu.Lock() + defer g.c.mu.Unlock() + g.mu.Lock() + defer g.mu.Unlock() + + // Eager: we already invalidated everything; nothing to re-invalidate. + // Cooperative: assign without invalidating what we are consuming. + g.c.assignPartitions(offsets, assignWithoutInvalidating, g.tps, fmt.Sprintf("newly fetched offsets for group %s", g.cfg.group)) + + // We need to update the uncommitted map so that SetOffsets(Committed) + // does not rewind before the committed offsets we just fetched. + if g.uncommitted == nil { + g.uncommitted = make(uncommitted, 10) + } + for topic, partitions := range offsets { + topicUncommitted := g.uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]uncommit, 20) + g.uncommitted[topic] = topicUncommitted + } + for partition, offset := range partitions { + if offset.at < 0 { + continue // not yet committed + } + committed := EpochOffset{ + Epoch: offset.epoch, + Offset: offset.at, + } + topicUncommitted[partition] = uncommit{ + dirty: committed, + head: committed, + committed: committed, + } + } + } + return nil +} + +// findNewAssignments updates topics the group wants to use and other metadata. +// We only grab the group mu at the end if we need to. +// +// This joins the group if +// - the group has never been joined +// - new topics are found for consuming (changing this consumer's join metadata) +// +// Additionally, if the member is the leader, this rejoins the group if the +// leader notices new partitions in an existing topic. +// +// This does not rejoin if the leader notices a partition is lost, which is +// finicky. +func (g *groupConsumer) findNewAssignments() { + topics := g.tps.load() + + type change struct { + isNew bool + delta int + } + + var rns reNews + if g.cfg.regex { + defer rns.log(&g.cl.cfg) + } + + var numNewTopics int + toChange := make(map[string]change, len(topics)) + for topic, topicPartitions := range topics { + parts := topicPartitions.load() + numPartitions := len(parts.partitions) + // If we are already using this topic, add that it changed if + // there are more partitions than we were using prior. + if used, exists := g.using[topic]; exists { + if added := numPartitions - used; added > 0 { + toChange[topic] = change{delta: added} + } + continue + } + + // We are iterating over g.tps, which is initialized in the + // group.init from the config's topics, but can also be added + // to in AddConsumeTopics. By default, we use the topic. If + // this is regex based, the config's topics are regular + // expressions that we need to evaluate against (and we do not + // support adding new regex). + useTopic := true + if g.cfg.regex { + want, seen := g.reSeen[topic] + if !seen { + for rawRe, re := range g.cfg.topics { + if want = re.MatchString(topic); want { + rns.add(rawRe, topic) + break + } + } + if !want { + rns.skip(topic) + } + g.reSeen[topic] = want + } + useTopic = want + } + + // We only track using the topic if there are partitions for + // it; if there are none, then the topic was set by _us_ as "we + // want to load the metadata", but the topic was not returned + // in the metadata (or it was returned with an error). + if useTopic && numPartitions > 0 { + if g.cfg.regex && parts.isInternal { + continue + } + toChange[topic] = change{isNew: true, delta: numPartitions} + numNewTopics++ + } + } + + externalRejoin := g.leader.Load() && g.getAndResetExternalRejoin() + + if len(toChange) == 0 && !externalRejoin { + return + } + + g.mu.Lock() + defer g.mu.Unlock() + + if g.dying { + return + } + + for topic, change := range toChange { + g.using[topic] += change.delta + } + + if !g.managing { + g.managing = true + go g.manage() + return + } + + if numNewTopics > 0 { + g.rejoin("rejoining because there are more topics to consume, our interests have changed") + } else if g.leader.Load() { + if len(toChange) > 0 { + g.rejoin("rejoining because we are the leader and noticed some topics have new partitions") + } else if externalRejoin { + g.rejoin("leader detected that partitions on topics another member is consuming have changed, rejoining to trigger rebalance") + } + } +} + +// uncommit tracks the latest offset polled (+1) and the latest commit. +// The reason head is just past the latest offset is because we want +// to commit TO an offset, not BEFORE an offset. +type uncommit struct { + dirty EpochOffset // if autocommitting, what will move to head on next Poll + head EpochOffset // ready to commit + committed EpochOffset // what is committed +} + +// EpochOffset combines a record offset with the leader epoch the broker +// was at when the record was written. +type EpochOffset struct { + // Epoch is the leader epoch of the record being committed. Truncation + // detection relies on the epoch of the CURRENT record. For truncation + // detection, the client asks "what is the the end of this epoch?", + // which returns one after the end offset (see the next field, and + // check the docs on kmsg.OffsetForLeaderEpochRequest). + Epoch int32 + + // Offset is the offset of a record. If committing, this should be one + // AFTER a record's offset. Clients start consuming at the offset that + // is committed. + Offset int64 +} + +// Less returns whether the this EpochOffset is less than another. This is less +// than the other if this one's epoch is less, or the epoch's are equal and +// this one's offset is less. +func (e EpochOffset) Less(o EpochOffset) bool { + return e.Epoch < o.Epoch || e.Epoch == o.Epoch && e.Offset < o.Offset +} + +type uncommitted map[string]map[int32]uncommit + +// updateUncommitted sets the latest uncommitted offset. +func (g *groupConsumer) updateUncommitted(fetches Fetches) { + var b bytes.Buffer + debug := g.cfg.logger.Level() >= LogLevelDebug + + // We set the head offset if autocommitting is disabled (because we + // only use head / committed in that case), or if we are greedily + // autocommitting (so that the latest head is available to autocommit). + setHead := g.cfg.autocommitDisable || g.cfg.autocommitGreedy + + g.mu.Lock() + defer g.mu.Unlock() + + for _, fetch := range fetches { + for _, topic := range fetch.Topics { + if debug { + fmt.Fprintf(&b, "%s[", topic.Topic) + } + var topicOffsets map[int32]uncommit + for _, partition := range topic.Partitions { + if len(partition.Records) == 0 { + continue + } + final := partition.Records[len(partition.Records)-1] + + if topicOffsets == nil { + if g.uncommitted == nil { + g.uncommitted = make(uncommitted, 10) + } + topicOffsets = g.uncommitted[topic.Topic] + if topicOffsets == nil { + topicOffsets = make(map[int32]uncommit, 20) + g.uncommitted[topic.Topic] = topicOffsets + } + } + + // Our new head points just past the final consumed offset, + // that is, if we rejoin, this is the offset to begin at. + set := EpochOffset{ + final.LeaderEpoch, // -1 if old message / unknown + final.Offset + 1, + } + prior := topicOffsets[partition.Partition] + + if debug { + if setHead { + fmt.Fprintf(&b, "%d{%d=>%d r%d}, ", partition.Partition, prior.head.Offset, set.Offset, len(partition.Records)) + } else { + fmt.Fprintf(&b, "%d{%d=>%d=>%d r%d}, ", partition.Partition, prior.head.Offset, prior.dirty.Offset, set.Offset, len(partition.Records)) + } + } + + prior.dirty = set + if setHead { + prior.head = set + } + topicOffsets[partition.Partition] = prior + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } + } + + if debug { + update := b.String() + update = strings.TrimSuffix(update, ", ") // trim trailing comma and space after final topic + g.cfg.logger.Log(LogLevelDebug, "updated uncommitted", "group", g.cfg.group, "to", update) + } +} + +// Called at the start of PollXyz only if autocommitting is enabled and we are +// not committing greedily, this ensures that when we enter poll, everything +// previously consumed is a candidate for autocommitting. +func (g *groupConsumer) undirtyUncommitted() { + if g == nil { + return + } + // Disabling autocommit means we do not use the dirty offset: we always + // update head, and then manual commits use that. + if g.cfg.autocommitDisable { + return + } + // Greedy autocommitting does not use dirty offsets, because we always + // just set head to the latest. + if g.cfg.autocommitGreedy { + return + } + // If we are autocommitting marked records only, then we do not + // automatically un-dirty our offsets. + if g.cfg.autocommitMarks { + return + } + + g.mu.Lock() + defer g.mu.Unlock() + + for _, partitions := range g.uncommitted { + for partition, uncommit := range partitions { + if uncommit.dirty != uncommit.head { + uncommit.head = uncommit.dirty + partitions[partition] = uncommit + } + } + } +} + +// updateCommitted updates the group's uncommitted map. This function triply +// verifies that the resp matches the req as it should and that the req does +// not somehow contain more than what is in our uncommitted map. +func (g *groupConsumer) updateCommitted( + req *kmsg.OffsetCommitRequest, + resp *kmsg.OffsetCommitResponse, +) { + g.mu.Lock() + defer g.mu.Unlock() + + if req.Generation != g.memberGen.generation() { + return + } + if g.uncommitted == nil { + g.cfg.logger.Log(LogLevelWarn, "received an OffsetCommitResponse after our group session has ended, unable to handle this (were we kicked from the group?)") + return + } + if len(req.Topics) != len(resp.Topics) { // bad kafka + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Num topics in request: %d, in reply: %d, we cannot handle this!", len(req.Topics), len(resp.Topics)), "group", g.cfg.group) + return + } + + sort.Slice(req.Topics, func(i, j int) bool { + return req.Topics[i].Topic < req.Topics[j].Topic + }) + sort.Slice(resp.Topics, func(i, j int) bool { + return resp.Topics[i].Topic < resp.Topics[j].Topic + }) + + var b bytes.Buffer + debug := g.cfg.logger.Level() >= LogLevelDebug + + for i := range resp.Topics { + reqTopic := &req.Topics[i] + respTopic := &resp.Topics[i] + topic := g.uncommitted[respTopic.Topic] + if topic == nil || // just in case + reqTopic.Topic != respTopic.Topic || // bad kafka + len(reqTopic.Partitions) != len(respTopic.Partitions) { // same + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Topic at request index %d: %s, reply at index: %s; num partitions on request topic: %d, in reply: %d, we cannot handle this!", i, reqTopic.Topic, respTopic.Topic, len(reqTopic.Partitions), len(respTopic.Partitions)), "group", g.cfg.group) + continue + } + + sort.Slice(reqTopic.Partitions, func(i, j int) bool { + return reqTopic.Partitions[i].Partition < reqTopic.Partitions[j].Partition + }) + sort.Slice(respTopic.Partitions, func(i, j int) bool { + return respTopic.Partitions[i].Partition < respTopic.Partitions[j].Partition + }) + + if debug { + fmt.Fprintf(&b, "%s[", respTopic.Topic) + } + for i := range respTopic.Partitions { + reqPart := &reqTopic.Partitions[i] + respPart := &respTopic.Partitions[i] + uncommit, exists := topic[respPart.Partition] + if !exists { // just in case + continue + } + if reqPart.Partition != respPart.Partition { // bad kafka + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Topic %s partition %d != resp partition %d", reqTopic.Topic, reqPart.Partition, respPart.Partition), "group", g.cfg.group) + continue + } + if respPart.ErrorCode != 0 { + g.cfg.logger.Log(LogLevelWarn, "unable to commit offset for topic partition", + "group", g.cfg.group, + "topic", reqTopic.Topic, + "partition", reqPart.Partition, + "commit_from", uncommit.committed.Offset, + "commit_to", reqPart.Offset, + "commit_epoch", reqPart.LeaderEpoch, + "error_code", respPart.ErrorCode, + ) + continue + } + + if debug { + fmt.Fprintf(&b, "%d{%d=>%d}, ", reqPart.Partition, uncommit.committed.Offset, reqPart.Offset) + } + + set := EpochOffset{ + reqPart.LeaderEpoch, + reqPart.Offset, + } + uncommit.committed = set + + // head is set in four places: + // (1) if manually committing or greedily autocommitting, + // then head is bumped on poll + // (2) if autocommitting normally, then head is bumped + // to the prior poll on poll + // (3) if using marks, head is bumped on mark + // (4) here, and we can be here on autocommit or on + // manual commit (usually manual in an onRevoke) + // + // head is usually at or past the commit: usually, head + // is used to build the commit itself. However, in case 4 + // when the user manually commits in onRevoke, the user + // is likely committing with UncommittedOffsets, i.e., + // the dirty offsets that are past the current head. + // We want to ensure we forward the head so that using + // it later does not rewind the manual commit. + // + // This does not affect the first case, because dirty == head, + // and manually committing dirty changes nothing. + // + // This does not affect the second case, because effectively, + // this is just bumping head early (dirty == head, no change). + // + // This *could* affect the third case, because an + // autocommit could begin, followed by a mark rewind, + // followed by autocommit completion. We document that + // using marks to rewind is not recommended. + // + // The user could also muck the offsets with SetOffsets. + // We document that concurrent committing is not encouraged, + // we do not attempt to guard past that. + // + // w.r.t. leader epoch's, we document that modifying + // leader epoch's is not recommended. + if uncommit.head.Less(set) { + uncommit.head = set + } + + topic[respPart.Partition] = uncommit + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } + + if debug { + update := b.String() + update = strings.TrimSuffix(update, ", ") // trim trailing comma and space after final topic + g.cfg.logger.Log(LogLevelDebug, "updated committed", "group", g.cfg.group, "to", update) + } +} + +func (g *groupConsumer) defaultCommitCallback(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + if !errors.Is(err, context.Canceled) { + g.cfg.logger.Log(LogLevelError, "default commit failed", "group", g.cfg.group, "err", err) + } else { + g.cfg.logger.Log(LogLevelDebug, "default commit canceled", "group", g.cfg.group) + } + return + } + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + g.cfg.logger.Log(LogLevelError, "in default commit: unable to commit offsets for topic partition", + "group", g.cfg.group, + "topic", topic.Topic, + "partition", partition.Partition, + "error", err) + } + } + } +} + +func (g *groupConsumer) loopCommit() { + ticker := time.NewTicker(g.cfg.autocommitInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + case <-g.ctx.Done(): + return + } + + // We use the group context for the default autocommit; revokes + // use the client context so that we can be sure we commit even + // after the group context is canceled (which is the first + // thing that happens so as to quit the manage loop before + // leaving a group). + // + // We always commit only the head. If we are autocommitting + // dirty, then updateUncommitted updates the head to dirty + // offsets. + g.noCommitDuringJoinAndSync.RLock() + g.mu.Lock() + if !g.blockAuto { + uncommitted := g.getUncommittedLocked(true, false) + if len(uncommitted) == 0 { + g.cfg.logger.Log(LogLevelDebug, "skipping autocommit due to no offsets to commit", "group", g.cfg.group) + g.noCommitDuringJoinAndSync.RUnlock() + } else { + g.cfg.logger.Log(LogLevelDebug, "autocommitting", "group", g.cfg.group) + g.commit(g.ctx, uncommitted, func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + g.cfg.commitCallback(cl, req, resp, err) + }) + } + } else { + g.noCommitDuringJoinAndSync.RUnlock() + } + g.mu.Unlock() + } +} + +// For SetOffsets, the gist of what follows: +// +// We need to set uncommitted.committed; that is the guarantee of this +// function. However, if, for everything we are setting, the head equals the +// commit, then we do not need to actually invalidate our current assignments. +// This is a great optimization for transactions that are resetting their state +// on abort. +func (g *groupConsumer) getSetAssigns(setOffsets map[string]map[int32]EpochOffset) (assigns map[string]map[int32]Offset) { + g.mu.Lock() + defer g.mu.Unlock() + + groupTopics := g.tps.load() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + for topic, partitions := range setOffsets { + if !groupTopics.hasTopic(topic) { + continue // trying to set a topic that was not assigned... + } + topicUncommitted := g.uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]uncommit) + g.uncommitted[topic] = topicUncommitted + } + var topicAssigns map[int32]Offset + for partition, epochOffset := range partitions { + current, exists := topicUncommitted[partition] + topicUncommitted[partition] = uncommit{ + dirty: epochOffset, + head: epochOffset, + committed: epochOffset, + } + if exists && current.dirty == epochOffset { + continue + } else if topicAssigns == nil { + topicAssigns = make(map[int32]Offset, len(partitions)) + } + topicAssigns[partition] = Offset{ + at: epochOffset.Offset, + epoch: epochOffset.Epoch, + } + } + if len(topicAssigns) > 0 { + if assigns == nil { + assigns = make(map[string]map[int32]Offset, 10) + } + assigns[topic] = topicAssigns + } + } + + return assigns +} + +// UncommittedOffsets returns the latest uncommitted offsets. Uncommitted +// offsets are always updated on calls to PollFetches. +// +// If there are no uncommitted offsets, this returns nil. +func (cl *Client) UncommittedOffsets() map[string]map[int32]EpochOffset { + if g := cl.consumer.g; g != nil { + return g.getUncommitted(true) + } + return nil +} + +// MarkedOffsets returns the latest marked offsets. When autocommitting, a +// marked offset is an offset that can be committed, in comparison to a dirty +// offset that cannot yet be committed. MarkedOffsets returns nil if you are +// not using AutoCommitMarks. +func (cl *Client) MarkedOffsets() map[string]map[int32]EpochOffset { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return nil + } + return g.getUncommitted(false) +} + +// CommittedOffsets returns the latest committed offsets. Committed offsets are +// updated from commits or from joining a group and fetching offsets. +// +// If there are no committed offsets, this returns nil. +func (cl *Client) CommittedOffsets() map[string]map[int32]EpochOffset { + g := cl.consumer.g + if g == nil { + return nil + } + g.mu.Lock() + defer g.mu.Unlock() + + return g.getUncommittedLocked(false, false) +} + +func (g *groupConsumer) getUncommitted(dirty bool) map[string]map[int32]EpochOffset { + g.mu.Lock() + defer g.mu.Unlock() + return g.getUncommittedLocked(true, dirty) +} + +func (g *groupConsumer) getUncommittedLocked(head, dirty bool) map[string]map[int32]EpochOffset { + if g.uncommitted == nil { + return nil + } + + var uncommitted map[string]map[int32]EpochOffset + for topic, partitions := range g.uncommitted { + var topicUncommitted map[int32]EpochOffset + for partition, uncommit := range partitions { + if head && (dirty && uncommit.dirty == uncommit.committed || !dirty && uncommit.head == uncommit.committed) { + continue + } + if topicUncommitted == nil { + if uncommitted == nil { + uncommitted = make(map[string]map[int32]EpochOffset, len(g.uncommitted)) + } + topicUncommitted = uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]EpochOffset, len(partitions)) + uncommitted[topic] = topicUncommitted + } + } + if head { + if dirty { + topicUncommitted[partition] = uncommit.dirty + } else { + topicUncommitted[partition] = uncommit.head + } + } else { + topicUncommitted[partition] = uncommit.committed + } + } + } + return uncommitted +} + +type commitContextFnT struct{} + +var commitContextFn commitContextFnT + +// PreCommitFnContext attaches fn to the context through WithValue. Using the +// context while committing allows fn to be called just before the commit is +// issued. This can be used to modify the actual commit, such as by associating +// metadata with partitions. If fn returns an error, the commit is not +// attempted. +func PreCommitFnContext(ctx context.Context, fn func(*kmsg.OffsetCommitRequest) error) context.Context { + return context.WithValue(ctx, commitContextFn, fn) +} + +type txnCommitContextFnT struct{} + +var txnCommitContextFn txnCommitContextFnT + +// PreTxnCommitFnContext attaches fn to the context through WithValue. Using +// the context while committing a transaction allows fn to be called just +// before the commit is issued. This can be used to modify the actual commit, +// such as by associating metadata with partitions (for transactions, the +// default internal metadata is the client's current member ID). If fn returns +// an error, the commit is not attempted. This context can be used in either +// GroupTransactSession.End or in Client.EndTransaction. +func PreTxnCommitFnContext(ctx context.Context, fn func(*kmsg.TxnOffsetCommitRequest) error) context.Context { + return context.WithValue(ctx, txnCommitContextFn, fn) +} + +// CommitRecords issues a synchronous offset commit for the offsets contained +// within rs. Retryable errors are retried up to the configured retry limit, +// and any unretryable error is returned. +// +// This function is useful as a simple way to commit offsets if you have +// disabled autocommitting. As an alternative if you always want to commit +// everything, see CommitUncommittedOffsets. +// +// Simple usage of this function may lead to duplicate records if a consumer +// group rebalance occurs before or while this function is being executed. You +// can avoid this scenario by calling CommitRecords in a custom +// OnPartitionsRevoked, but for most workloads, a small bit of potential +// duplicate processing is fine. See the documentation on DisableAutoCommit +// for more details. You can also avoid this problem by using +// BlockRebalanceOnPoll, but that option comes with its own tradeoffs (refer to +// its documentation). +// +// It is recommended to always commit records in order (per partition). If you +// call this function twice with record for partition 0 at offset 999 +// initially, and then with record for partition 0 at offset 4, you will rewind +// your commit. +// +// A use case for this function may be to partially process a batch of records, +// commit, and then continue to process the rest of the records. It is not +// recommended to call this for every record processed in a high throughput +// scenario, because you do not want to unnecessarily increase load on Kafka. +// +// If you do not want to wait for this function to complete before continuing +// processing records, you can call this function in a goroutine. +func (cl *Client) CommitRecords(ctx context.Context, rs ...*Record) error { + // First build the offset commit map. We favor the latest epoch, then + // offset, if any records map to the same topic / partition. + offsets := make(map[string]map[int32]EpochOffset) + for _, r := range rs { + toffsets := offsets[r.Topic] + if toffsets == nil { + toffsets = make(map[int32]EpochOffset) + offsets[r.Topic] = toffsets + } + + if at, exists := toffsets[r.Partition]; exists { + if at.Epoch > r.LeaderEpoch || at.Epoch == r.LeaderEpoch && at.Offset > r.Offset { + continue + } + } + toffsets[r.Partition] = EpochOffset{ + r.LeaderEpoch, + r.Offset + 1, // need to advice to next offset to move forward + } + } + + var rerr error // return error + + // Our client retries an OffsetCommitRequest as necessary if the first + // response partition has a retryable group error (group coordinator + // loading, etc), so any partition error is fatal. + cl.CommitOffsetsSync(ctx, offsets, func(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + rerr = err + return + } + + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + rerr = err + return + } + } + } + }) + + return rerr +} + +// MarkCommitRecords marks records to be available for autocommitting. This +// function is only useful if you use the AutoCommitMarks config option, see +// the documentation on that option for more details. This function does not +// allow rewinds. +func (cl *Client) MarkCommitRecords(rs ...*Record) { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return + } + + sort.Slice(rs, func(i, j int) bool { + return rs[i].Topic < rs[j].Topic || + rs[i].Topic == rs[j].Topic && rs[i].Partition < rs[j].Partition + }) + + // protect g.uncommitted map + g.mu.Lock() + defer g.mu.Unlock() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + var curTopic string + var curPartitions map[int32]uncommit + for _, r := range rs { + if curPartitions == nil || r.Topic != curTopic { + curPartitions = g.uncommitted[r.Topic] + if curPartitions == nil { + curPartitions = make(map[int32]uncommit) + g.uncommitted[r.Topic] = curPartitions + } + curTopic = r.Topic + } + + current := curPartitions[r.Partition] + if newHead := (EpochOffset{ + r.LeaderEpoch, + r.Offset + 1, + }); current.head.Less(newHead) { + curPartitions[r.Partition] = uncommit{ + dirty: current.dirty, + committed: current.committed, + head: newHead, + } + } + } +} + +// MarkCommitOffsets marks offsets to be available for autocommitting. This +// function is only useful if you use the AutoCommitMarks config option, see +// the documentation on that option for more details. This function does not +// allow rewinds. +func (cl *Client) MarkCommitOffsets(unmarked map[string]map[int32]EpochOffset) { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return + } + + // protect g.uncommitted map + g.mu.Lock() + defer g.mu.Unlock() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + + for topic, partitions := range unmarked { + curPartitions := g.uncommitted[topic] + if curPartitions == nil { + curPartitions = make(map[int32]uncommit) + g.uncommitted[topic] = curPartitions + } + + for partition, newHead := range partitions { + current := curPartitions[partition] + if current.head.Less(newHead) { + curPartitions[partition] = uncommit{ + dirty: current.dirty, + committed: current.committed, + head: newHead, + } + } + } + } +} + +// CommitUncommittedOffsets issues a synchronous offset commit for any +// partition that has been consumed from that has uncommitted offsets. +// Retryable errors are retried up to the configured retry limit, and any +// unretryable error is returned. +// +// The recommended pattern for using this function is to have a poll / process +// / commit loop. First PollFetches, then process every record, then call +// CommitUncommittedOffsets. +// +// As an alternative if you want to commit specific records, see CommitRecords. +func (cl *Client) CommitUncommittedOffsets(ctx context.Context) error { + // This function is just the tail end of CommitRecords just above. + return cl.commitOffsets(ctx, cl.UncommittedOffsets()) +} + +// CommitMarkedOffsets issues a synchronous offset commit for any partition +// that has been consumed from that has marked offsets. Retryable errors are +// retried up to the configured retry limit, and any unretryable error is +// returned. +// +// This function is only useful if you have marked offsets with +// MarkCommitRecords when using AutoCommitMarks, otherwise this is a no-op. +// +// The recommended pattern for using this function is to have a poll / process +// / commit loop. First PollFetches, then process every record, +// call MarkCommitRecords for the records you wish the commit and then call +// CommitMarkedOffsets. +// +// As an alternative if you want to commit specific records, see CommitRecords. +func (cl *Client) CommitMarkedOffsets(ctx context.Context) error { + // This function is just the tail end of CommitRecords just above. + marked := cl.MarkedOffsets() + if len(marked) == 0 { + return nil + } + return cl.commitOffsets(ctx, marked) +} + +func (cl *Client) commitOffsets(ctx context.Context, offsets map[string]map[int32]EpochOffset) error { + var rerr error + cl.CommitOffsetsSync(ctx, offsets, func(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + rerr = err + return + } + + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + rerr = err + return + } + } + } + }) + return rerr +} + +// CommitOffsetsSync cancels any active CommitOffsets, begins a commit that +// cannot be canceled, and waits for that commit to complete. This function +// will not return until the commit is done and the onDone callback is +// complete. +// +// The purpose of this function is for use in OnPartitionsRevoked or committing +// before leaving a group, because you do not want to have a commit issued in +// OnPartitionsRevoked canceled. +// +// This is an advanced function, and for simpler, more easily understandable +// committing, see CommitRecords and CommitUncommittedOffsets. +// +// For more information about committing and committing asynchronously, see +// CommitOffsets. +func (cl *Client) CommitOffsetsSync( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + g := cl.consumer.g + if g == nil { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), errNotGroup) + return + } + if len(uncommitted) == 0 { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + g.commitOffsetsSync(ctx, uncommitted, onDone) +} + +// waitJoinSyncMu is a rather insane way to try to grab a lock, but also return +// early if we have to wait and the context is canceled. +func (g *groupConsumer) waitJoinSyncMu(ctx context.Context) error { + if g.noCommitDuringJoinAndSync.TryRLock() { + g.cfg.logger.Log(LogLevelDebug, "grabbed join/sync mu on first try") + return nil + } + + var ( + blockJoinSyncCh = make(chan struct{}) + mu sync.Mutex + returned bool + maybeRUnlock = func() { + mu.Lock() + defer mu.Unlock() + if returned { + g.noCommitDuringJoinAndSync.RUnlock() + } + returned = true + } + ) + + go func() { + g.noCommitDuringJoinAndSync.RLock() + close(blockJoinSyncCh) + maybeRUnlock() + }() + + select { + case <-blockJoinSyncCh: + g.cfg.logger.Log(LogLevelDebug, "grabbed join/sync mu after waiting") + return nil + case <-ctx.Done(): + g.cfg.logger.Log(LogLevelDebug, "not grabbing mu because context canceled") + maybeRUnlock() + return ctx.Err() + } +} + +func (g *groupConsumer) commitOffsetsSync( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + g.cfg.logger.Log(LogLevelDebug, "in CommitOffsetsSync", "group", g.cfg.group, "with", uncommitted) + defer g.cfg.logger.Log(LogLevelDebug, "left CommitOffsetsSync", "group", g.cfg.group) + + done := make(chan struct{}) + defer func() { <-done }() + + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), err) + close(done) + return + } + + g.syncCommitMu.Lock() // block all other concurrent commits until our OnDone is done. + unblockCommits := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + defer close(done) + defer g.syncCommitMu.Unlock() + onDone(cl, req, resp, err) + } + + g.mu.Lock() + defer g.mu.Unlock() + + g.blockAuto = true + unblockAuto := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + unblockCommits(cl, req, resp, err) + g.mu.Lock() + defer g.mu.Unlock() + g.blockAuto = false + } + + g.commit(ctx, uncommitted, unblockAuto) +} + +// CommitOffsets commits the given offsets for a group, calling onDone with the +// commit request and either the response or an error if the response was not +// issued. If uncommitted is empty or the client is not consuming as a group, +// onDone is called with (nil, nil, nil) and this function returns immediately. +// It is OK if onDone is nil, but you will not know if your commit succeeded. +// +// This is an advanced function and is difficult to use correctly. For simpler, +// more easily understandable committing, see CommitRecords and +// CommitUncommittedOffsets. +// +// This function itself does not wait for the commit to finish. By default, +// this function is an asynchronous commit. You can use onDone to make it sync. +// If autocommitting is enabled, this function blocks autocommitting until this +// function is complete and the onDone has returned. +// +// It is invalid to use this function to commit offsets for a transaction. +// +// Note that this function ensures absolute ordering of commit requests by +// canceling prior requests and ensuring they are done before executing a new +// one. This means, for absolute control, you can use this function to +// periodically commit async and then issue a final sync commit before quitting +// (this is the behavior of autocommiting and using the default revoke). This +// differs from the Java async commit, which does not retry requests to avoid +// trampling on future commits. +// +// It is highly recommended to check the response's partition's error codes if +// the response is non-nil. While unlikely, individual partitions can error. +// This is most likely to happen if a commit occurs too late in a rebalance +// event. +// +// Do not use this async CommitOffsets in OnPartitionsRevoked, instead use +// CommitOffsetsSync. If you commit async, the rebalance will proceed before +// this function executes, and you will commit offsets for partitions that have +// moved to a different consumer. +func (cl *Client) CommitOffsets( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + cl.cfg.logger.Log(LogLevelDebug, "in CommitOffsets", "with", uncommitted) + defer cl.cfg.logger.Log(LogLevelDebug, "left CommitOffsets") + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + g := cl.consumer.g + if g == nil { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), errNotGroup) + return + } + if len(uncommitted) == 0 { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), err) + return + } + + g.syncCommitMu.RLock() // block sync commit, but allow other concurrent Commit to cancel us + unblockJoinSync := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + defer g.syncCommitMu.RUnlock() + onDone(cl, req, resp, err) + } + + g.mu.Lock() + defer g.mu.Unlock() + + g.blockAuto = true + unblockAuto := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + unblockJoinSync(cl, req, resp, err) + g.mu.Lock() + defer g.mu.Unlock() + g.blockAuto = false + } + + g.commit(ctx, uncommitted, unblockAuto) +} + +// defaultRevoke commits the last fetched offsets and waits for the commit to +// finish. This is the default onRevoked function which, when combined with the +// default autocommit, ensures we never miss committing everything. +// +// Note that the heartbeat loop invalidates all buffered, unpolled fetches +// before revoking, meaning this truly will commit all polled fetches. +func (g *groupConsumer) defaultRevoke(context.Context, *Client, map[string][]int32) { + if !g.cfg.autocommitDisable { + // We use the client's context rather than the group context, + // because this could come from the group being left. The group + // context will already be canceled. + g.commitOffsetsSync(g.cl.ctx, g.getUncommitted(false), g.cfg.commitCallback) + } +} + +// The actual logic to commit. This is called under two locks: +// - g.noCommitDuringJoinAndSync.RLock() +// - g.mu.Lock() +// +// By blocking the JoinGroup from being issued, or blocking the commit on join +// & sync finishing, we avoid RebalanceInProgress and IllegalGeneration. The +// former error happens if a commit arrives to the broker between the two, the +// latter error happens when a commit arrives to the broker with the old +// generation (it was in flight before sync finished). +// +// Practically, what this means is that a user's commits will be blocked if +// they try to commit between join and sync. +// +// For eager consuming, the user should not have any partitions to commit +// anyway. For cooperative consuming, a rebalance can happen after at any +// moment. We block only revokation aspects of rebalances with +// BlockRebalanceOnPoll; we want to allow the cooperative part of rebalancing +// to occur. +func (g *groupConsumer) commit( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + // The user could theoretically give us topics that have no partitions + // to commit. We strip those: Kafka does not reply to them, and we + // expect all partitions in our request to be replied to in + // updateCommitted. If any topic is empty, we deeply clone and then + // strip everything empty. See #186. + var clone bool + for _, ps := range uncommitted { + if len(ps) == 0 { + clone = true + break + } + } + if clone { + dup := make(map[string]map[int32]EpochOffset, len(uncommitted)) + for t, ps := range uncommitted { + if len(ps) == 0 { + continue + } + dupPs := make(map[int32]EpochOffset, len(ps)) + dup[t] = dupPs + for p, eo := range ps { + dupPs[p] = eo + } + } + uncommitted = dup + } + + if len(uncommitted) == 0 { // only empty if called thru autocommit / default revoke + // We have to do this concurrently because the expectation is + // that commit itself does not block. + go onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + + priorCancel := g.commitCancel + priorDone := g.commitDone + + commitCtx, commitCancel := context.WithCancel(ctx) // enable ours to be canceled and waited for + commitDone := make(chan struct{}) + + g.commitCancel = commitCancel + g.commitDone = commitDone + + req := kmsg.NewPtrOffsetCommitRequest() + req.Group = g.cfg.group + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + + if ctx.Done() != nil { + go func() { + select { + case <-ctx.Done(): + commitCancel() + case <-commitCtx.Done(): + } + }() + } + + go func() { + defer close(commitDone) // allow future commits to continue when we are done + defer commitCancel() + if priorDone != nil { // wait for any prior request to finish + select { + case <-priorDone: + default: + g.cfg.logger.Log(LogLevelDebug, "canceling prior commit to issue another", "group", g.cfg.group) + priorCancel() + <-priorDone + } + } + g.cfg.logger.Log(LogLevelDebug, "issuing commit", "group", g.cfg.group, "uncommitted", uncommitted) + + for topic, partitions := range uncommitted { + reqTopic := kmsg.NewOffsetCommitRequestTopic() + reqTopic.Topic = topic + for partition, eo := range partitions { + reqPartition := kmsg.NewOffsetCommitRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.Offset = eo.Offset + reqPartition.LeaderEpoch = eo.Epoch // KIP-320 + reqPartition.Metadata = &req.MemberID + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + req.Topics = append(req.Topics, reqTopic) + } + + if fn, ok := ctx.Value(commitContextFn).(func(*kmsg.OffsetCommitRequest) error); ok { + if err := fn(req); err != nil { + onDone(g.cl, req, nil, err) + return + } + } + + resp, err := req.RequestWith(commitCtx, g.cl) + if err != nil { + onDone(g.cl, req, nil, err) + return + } + g.updateCommitted(req, resp) + onDone(g.cl, req, resp, nil) + }() +} + +type reNews struct { + added map[string][]string + skipped []string +} + +func (r *reNews) add(re, match string) { + if r.added == nil { + r.added = make(map[string][]string) + } + r.added[re] = append(r.added[re], match) +} + +func (r *reNews) skip(topic string) { + r.skipped = append(r.skipped, topic) +} + +func (r *reNews) log(cfg *cfg) { + if len(r.added) == 0 && len(r.skipped) == 0 { + return + } + var addeds []string + for re, matches := range r.added { + sort.Strings(matches) + addeds = append(addeds, fmt.Sprintf("%s[%s]", re, strings.Join(matches, " "))) + } + added := strings.Join(addeds, " ") + sort.Strings(r.skipped) + cfg.logger.Log(LogLevelInfo, "consumer regular expressions evaluated on new topics", "added", added, "evaluated_and_skipped", r.skipped) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go b/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go new file mode 100644 index 000000000000..3ff1dbfebe81 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go @@ -0,0 +1,321 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" +) + +func isRetryableBrokerErr(err error) bool { + // The error could be nil if we are evaluating multiple errors at once, + // and only one is non-nil. The intent of this function is to evaluate + // whether an **error** is retryable, not a non-error. We return that + // nil is not retryable -- the calling code evaluating multiple errors + // at once would not call into this function if all errors were nil. + if err == nil { + return false + } + // https://github.com/golang/go/issues/45729 + // + // Temporary is relatively useless. We will still check for the + // temporary interface, and in all cases, even with timeouts, we want + // to retry. + // + // More generally, we will retry for any error that unwraps into an + // os.SyscallError. Looking at Go's net package, the error we care + // about is net.OpError. Looking into that further, any error that + // reaches into the operating system return a syscall error, which is + // then put in net.OpError's Err field as an os.SyscallError. There are + // a few non-os.SyscallError errors, these are where Go itself detects + // a hard failure. We do not retry those. + // + // We blanket retry os.SyscallError because a lot of the times, what + // appears as a hard failure can actually be retried. For example, a + // failed dial can be retried, maybe the resolver temporarily had a + // problem. + // + // We favor testing os.SyscallError first, because net.OpError _always_ + // implements Temporary, so if we test that first, it'll return false + // in many cases when we want to return true from os.SyscallError. + if se := (*os.SyscallError)(nil); errors.As(err, &se) { + // If a dial fails, potentially we could retry if the resolver + // had a temporary hiccup, but we will err on the side of this + // being a slightly less temporary error. + return !isDialNonTimeoutErr(err) + } + // EOF can be returned if a broker kills a connection unexpectedly, and + // we can retry that. Same for ErrClosed. + if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) { + return true + } + // We could have a retryable producer ID failure, which then bubbled up + // as errProducerIDLoadFail so as to be retried later. + if pe := (*errProducerIDLoadFail)(nil); errors.As(err, &pe) { + return true + } + // We could have chosen a broker, and then a concurrent metadata update + // could have removed it. + if errors.Is(err, errChosenBrokerDead) { + return true + } + // A broker kept giving us short sasl lifetimes, so we killed the + // connection ourselves. We can retry on a new connection. + if errors.Is(err, errSaslReauthLoop) { + return true + } + // We really should not get correlation mismatch, but if we do, we can + // retry. + if errors.Is(err, errCorrelationIDMismatch) { + return true + } + // We sometimes load the controller before issuing requests, and the + // cluster may not yet be ready and will return -1 for the controller. + // We can backoff and retry and hope the cluster has stabilized. + if ce := (*errUnknownController)(nil); errors.As(err, &ce) { + return true + } + // Same thought for a non-existing coordinator. + if ce := (*errUnknownCoordinator)(nil); errors.As(err, &ce) { + return true + } + var tempErr interface{ Temporary() bool } + if errors.As(err, &tempErr) { + return tempErr.Temporary() + } + return false +} + +func isDialNonTimeoutErr(err error) bool { + var ne *net.OpError + return errors.As(err, &ne) && ne.Op == "dial" && !ne.Timeout() +} + +func isAnyDialErr(err error) bool { + var ne *net.OpError + return errors.As(err, &ne) && ne.Op == "dial" +} + +func isContextErr(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} + +func isSkippableBrokerErr(err error) bool { + // Some broker errors are not retryable for the given broker itself, + // but we *could* skip the broker and try again on the next broker. For + // example, if the user input an invalid address and a valid address + // for seeds, when we fail dialing the first seed, we cannot retry that + // broker, but we can skip to the next. + // + // We take anything that returns an OpError that *is not* a context + // error deep inside. + if errors.Is(err, errUnknownBroker) { + return true + } + var ne *net.OpError + if errors.As(err, &ne) && !isContextErr(err) { + return true + } + return false +} + +var ( + ////////////// + // INTERNAL // -- when used multiple times or checked in different areas of the client + ////////////// + + // Returned when issuing a request to a broker that the client does not + // know about (maybe missing from metadata responses now). + errUnknownBroker = errors.New("unknown broker") + + // A temporary error returned when a broker chosen for a request is + // stopped due to a concurrent metadata response. + errChosenBrokerDead = errors.New("the internal broker struct chosen to issue this request has died--either the broker id is migrating or no longer exists") + + // If a broker repeatedly gives us tiny sasl lifetimes, we fail a + // request after a few tries to forcefully kill the connection and + // restart a new connection ourselves. + errSaslReauthLoop = errors.New("the broker is repeatedly giving us sasl lifetimes that are too short to write a request") + + // A temporary error returned when Kafka replies with a different + // correlation ID than we were expecting for the request the client + // issued. + // + // If this error happens, the client closes the broker connection. + errCorrelationIDMismatch = errors.New("correlation ID mismatch") + + // Returned when using a kmsg.Request with a key larger than kmsg.MaxKey. + errUnknownRequestKey = errors.New("request key is unknown") + + // Returned if a connection has loaded broker ApiVersions and knows + // that the broker cannot handle the request to-be-issued request. + errBrokerTooOld = errors.New("broker is too old; the broker has already indicated it will not know how to handle the request") + + // Returned when trying to call group functions when the client is not + // assigned a group. + errNotGroup = errors.New("invalid group function call when not assigned a group") + + // Returned when trying to begin a transaction with a client that does + // not have a transactional ID. + errNotTransactional = errors.New("invalid attempt to begin a transaction with a non-transactional client") + + // Returned when trying to produce a record outside of a transaction. + errNotInTransaction = errors.New("cannot produce record transactionally if not in a transaction") + + errNoTopic = errors.New("cannot produce record with no topic and no default topic") + + // Returned for all buffered produce records when a user purges topics. + errPurged = errors.New("topic purged while buffered") + + errMissingMetadataPartition = errors.New("metadata update is missing a partition that we were previously using") + + errNoCommittedOffset = errors.New("partition has no prior committed offset") + + ////////////// + // EXTERNAL // + ////////////// + + // ErrRecordTimeout is passed to produce promises when records are + // unable to be produced within the RecordDeliveryTimeout. + ErrRecordTimeout = errors.New("records have timed out before they were able to be produced") + + // ErrRecordRetries is passed to produce promises when records are + // unable to be produced after RecordRetries attempts. + ErrRecordRetries = errors.New("record failed after being retried too many times") + + // ErrMaxBuffered is returned when the maximum amount of records are + // buffered and either manual flushing is enabled or you are using + // TryProduce. + ErrMaxBuffered = errors.New("the maximum amount of records are buffered, cannot buffer more") + + // ErrAborting is returned for all buffered records while + // AbortBufferedRecords is being called. + ErrAborting = errors.New("client is aborting buffered records") + + // ErrClientClosed is returned in various places when the client's + // Close function has been called. + // + // For producing, records are failed with this error. + // + // For consuming, a fake partition is injected into a poll response + // that has this error. + // + // For any request, the request is failed with this error. + ErrClientClosed = errors.New("client closed") +) + +// ErrFirstReadEOF is returned for responses that immediately error with +// io.EOF. This is the client's guess as to why a read from a broker is +// failing with io.EOF. Two cases are currently handled, +// +// - When the client is using TLS but brokers are not, brokers close +// connections immediately because the incoming request looks wrong. +// - When SASL is required but missing, brokers close connections immediately. +// +// There may be other reasons that an immediate io.EOF is encountered (perhaps +// the connection truly was severed before a response was received), but this +// error can help you quickly check common problems. +type ErrFirstReadEOF struct { + kind uint8 + err error +} + +type errProducerIDLoadFail struct { + err error +} + +func (e *errProducerIDLoadFail) Error() string { + if e.err == nil { + return "unable to initialize a producer ID due to request failures" + } + return fmt.Sprintf("unable to initialize a producer ID due to request failures: %v", e.err) +} + +func (e *errProducerIDLoadFail) Unwrap() error { return e.err } + +const ( + firstReadSASL uint8 = iota + firstReadTLS +) + +func (e *ErrFirstReadEOF) Error() string { + switch e.kind { + case firstReadTLS: + return "broker closed the connection immediately after a dial, which happens if the client is using TLS when the broker is not expecting it: is TLS misconfigured on the client or the broker?" + default: // firstReadSASL + return "broker closed the connection immediately after a request was issued, which happens when SASL is required but not provided: is SASL missing?" + } +} + +// Unwrap returns io.EOF (or, if a custom dialer returned a wrapped io.EOF, +// this returns the custom dialer's wrapped error). +func (e *ErrFirstReadEOF) Unwrap() error { return e.err } + +// ErrDataLoss is returned for Kafka >=2.1 when data loss is detected and the +// client is able to reset to the last valid offset. +type ErrDataLoss struct { + // Topic is the topic data loss was detected on. + Topic string + // Partition is the partition data loss was detected on. + Partition int32 + // ConsumedTo is what the client had consumed to for this partition before + // data loss was detected. + ConsumedTo int64 + // ResetTo is what the client reset the partition to; everything from + // ResetTo to ConsumedTo was lost. + ResetTo int64 +} + +func (e *ErrDataLoss) Error() string { + return fmt.Sprintf("topic %s partition %d lost records;"+ + " the client consumed to offset %d but was reset to offset %d", + e.Topic, e.Partition, e.ConsumedTo, e.ResetTo) +} + +type errUnknownController struct { + id int32 +} + +func (e *errUnknownController) Error() string { + if e.id == -1 { + return "broker replied that the controller broker is not available" + } + return fmt.Sprintf("broker replied that the controller broker is %d,"+ + " but did not reply with that broker in the broker list", e.id) +} + +type errUnknownCoordinator struct { + coordinator int32 + key coordinatorKey +} + +func (e *errUnknownCoordinator) Error() string { + switch e.key.typ { + case coordinatorTypeGroup: + return fmt.Sprintf("broker replied that group %s has broker coordinator %d,"+ + " but did not reply with that broker in the broker list", + e.key.name, e.coordinator) + case coordinatorTypeTxn: + return fmt.Sprintf("broker replied that txn id %s has broker coordinator %d,"+ + " but did not reply with that broker in the broker list", + e.key.name, e.coordinator) + default: + return fmt.Sprintf("broker replied to an unknown coordinator key %s (type %d) that it has a broker coordinator %d,"+ + " but did not reply with that broker in the broker list", e.key.name, e.key.typ, e.coordinator) + } +} + +// ErrGroupSession is injected into a poll if an error occurred such that your +// consumer group member was kicked from the group or was never able to join +// the group. +type ErrGroupSession struct { + err error +} + +func (e *ErrGroupSession) Error() string { + return fmt.Sprintf("unable to join group session: %v", e.err) +} + +func (e *ErrGroupSession) Unwrap() error { return e.err } diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go b/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go new file mode 100644 index 000000000000..483c3e912772 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go @@ -0,0 +1,57 @@ +//go:build !go1.19 +// +build !go1.19 + +package kgo + +import "sync/atomic" + +type atomicBool uint32 + +func (b *atomicBool) Store(v bool) { + if v { + atomic.StoreUint32((*uint32)(b), 1) + } else { + atomic.StoreUint32((*uint32)(b), 0) + } +} + +func (b *atomicBool) Load() bool { return atomic.LoadUint32((*uint32)(b)) == 1 } + +func (b *atomicBool) Swap(v bool) bool { + var swap uint32 + if v { + swap = 1 + } + return atomic.SwapUint32((*uint32)(b), swap) == 1 +} + +type atomicI32 int32 + +func (v *atomicI32) Add(s int32) int32 { return atomic.AddInt32((*int32)(v), s) } +func (v *atomicI32) Store(s int32) { atomic.StoreInt32((*int32)(v), s) } +func (v *atomicI32) Load() int32 { return atomic.LoadInt32((*int32)(v)) } +func (v *atomicI32) Swap(s int32) int32 { return atomic.SwapInt32((*int32)(v), s) } + +type atomicU32 uint32 + +func (v *atomicU32) Add(s uint32) uint32 { return atomic.AddUint32((*uint32)(v), s) } +func (v *atomicU32) Store(s uint32) { atomic.StoreUint32((*uint32)(v), s) } +func (v *atomicU32) Load() uint32 { return atomic.LoadUint32((*uint32)(v)) } +func (v *atomicU32) Swap(s uint32) uint32 { return atomic.SwapUint32((*uint32)(v), s) } +func (v *atomicU32) CompareAndSwap(old, new uint32) bool { + return atomic.CompareAndSwapUint32((*uint32)(v), old, new) +} + +type atomicI64 int64 + +func (v *atomicI64) Add(s int64) int64 { return atomic.AddInt64((*int64)(v), s) } +func (v *atomicI64) Store(s int64) { atomic.StoreInt64((*int64)(v), s) } +func (v *atomicI64) Load() int64 { return atomic.LoadInt64((*int64)(v)) } +func (v *atomicI64) Swap(s int64) int64 { return atomic.SwapInt64((*int64)(v), s) } + +type atomicU64 uint64 + +func (v *atomicU64) Add(s uint64) uint64 { return atomic.AddUint64((*uint64)(v), s) } +func (v *atomicU64) Store(s uint64) { atomic.StoreUint64((*uint64)(v), s) } +func (v *atomicU64) Load() uint64 { return atomic.LoadUint64((*uint64)(v)) } +func (v *atomicU64) Swap(s uint64) uint64 { return atomic.SwapUint64((*uint64)(v), s) } diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go b/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go new file mode 100644 index 000000000000..7c8ade5e139a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package kgo + +import "sync/atomic" + +type ( + atomicBool struct{ atomic.Bool } + atomicI32 struct{ atomic.Int32 } + atomicU32 struct{ atomic.Uint32 } + atomicI64 struct{ atomic.Int64 } + atomicU64 struct{ atomic.Uint64 } +) diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go new file mode 100644 index 000000000000..85f31a5342a1 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go @@ -0,0 +1,959 @@ +package kgo + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo/internal/sticky" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// GroupBalancer balances topics and partitions among group members. +// +// A GroupBalancer is roughly equivalent to Kafka's PartitionAssignor. +type GroupBalancer interface { + // ProtocolName returns the name of the protocol, e.g. roundrobin, + // range, sticky. + ProtocolName() string + + // JoinGroupMetadata returns the metadata to use in JoinGroup, given + // the topic interests and the current assignment and group generation. + // + // It is safe to modify the input topics and currentAssignment. The + // input topics are guaranteed to be sorted, as are the partitions for + // each topic in currentAssignment. It is recommended for your output + // to be ordered by topic and partitions. Since Kafka uses the output + // from this function to determine whether a rebalance is needed, a + // deterministic output will avoid accidental rebalances. + JoinGroupMetadata( + topicInterests []string, + currentAssignment map[string][]int32, + generation int32, + ) []byte + + // ParseSyncAssignment returns assigned topics and partitions from an + // encoded SyncGroupResponse's MemberAssignment. + ParseSyncAssignment(assignment []byte) (map[string][]int32, error) + + // MemberBalancer returns a GroupMemberBalancer for the given group + // members, as well as the topics that all the members are interested + // in. If the client does not have some topics in the returned topics, + // the client issues a metadata request to load the number of + // partitions in those topics before calling the GroupMemberBalancer's + // Balance function. + // + // The input group members are guaranteed to be sorted first by + // instance ID, if non-nil, and then by member ID. + // + // It is up to the user to decide how to decode each member's + // ProtocolMetadata field. The default client group protocol of + // "consumer" by default uses join group metadata's of type + // kmsg.ConsumerMemberMetadata. If this is the case for you, it may be + // useful to use the ConsumerBalancer type to help parse the metadata + // and balance. + // + // If the member metadata cannot be deserialized correctly, this should + // return a relevant error. + MemberBalancer(members []kmsg.JoinGroupResponseMember) (b GroupMemberBalancer, topics map[string]struct{}, err error) + + // IsCooperative returns if this is a cooperative balance strategy. + IsCooperative() bool +} + +// GroupMemberBalancer balances topics amongst group members. If your balancing +// can fail, you can implement GroupMemberBalancerOrError. +type GroupMemberBalancer interface { + // Balance balances topics and partitions among group members, where + // the int32 in the topics map corresponds to the number of partitions + // known to be in each topic. + Balance(topics map[string]int32) IntoSyncAssignment +} + +// GroupMemberBalancerOrError is an optional extension interface for +// GroupMemberBalancer. This can be implemented if your balance function can +// fail. +// +// For interface purposes, it is required to implement GroupMemberBalancer, but +// Balance will never be called. +type GroupMemberBalancerOrError interface { + GroupMemberBalancer + BalanceOrError(topics map[string]int32) (IntoSyncAssignment, error) +} + +// IntoSyncAssignment takes a balance plan and returns a list of assignments to +// use in a kmsg.SyncGroupRequest. +// +// It is recommended to ensure the output is deterministic and ordered by +// member / topic / partitions. +type IntoSyncAssignment interface { + IntoSyncAssignment() []kmsg.SyncGroupRequestGroupAssignment +} + +// ConsumerBalancer is a helper type for writing balance plans that use the +// "consumer" protocol, such that each member uses a kmsg.ConsumerMemberMetadata +// in its join group request. +type ConsumerBalancer struct { + b ConsumerBalancerBalance + members []kmsg.JoinGroupResponseMember + metadatas []kmsg.ConsumerMemberMetadata + topics map[string]struct{} + + err error +} + +// Balance satisfies the GroupMemberBalancer interface, but is never called +// because GroupMemberBalancerOrError exists. +func (*ConsumerBalancer) Balance(map[string]int32) IntoSyncAssignment { + panic("unreachable") +} + +// BalanceOrError satisfies the GroupMemberBalancerOrError interface. +func (b *ConsumerBalancer) BalanceOrError(topics map[string]int32) (IntoSyncAssignment, error) { + return b.b.Balance(b, topics), b.err +} + +// Members returns the list of input members for this group balancer. +func (b *ConsumerBalancer) Members() []kmsg.JoinGroupResponseMember { + return b.members +} + +// EachMember calls fn for each member and its corresponding metadata in the +// consumer group being balanced. +func (b *ConsumerBalancer) EachMember(fn func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata)) { + for i := range b.members { + fn(&b.members[i], &b.metadatas[i]) + } +} + +// MemberAt returns the nth member and its corresponding metadata. +func (b *ConsumerBalancer) MemberAt(n int) (*kmsg.JoinGroupResponseMember, *kmsg.ConsumerMemberMetadata) { + return &b.members[n], &b.metadatas[n] +} + +// SetError allows you to set any error that occurred while balancing. This +// allows you to fail balancing and return nil from Balance. +func (b *ConsumerBalancer) SetError(err error) { + b.err = err +} + +// MemberTopics returns the unique set of topics that all members are +// interested in. +// +// This can safely be called if the balancer is nil; if so, this will return +// nil. +func (b *ConsumerBalancer) MemberTopics() map[string]struct{} { + if b == nil { + return nil + } + return b.topics +} + +// NewPlan returns a type that can be used to build a balance plan. The return +// satisfies the IntoSyncAssignment interface. +func (b *ConsumerBalancer) NewPlan() *BalancePlan { + plan := make(map[string]map[string][]int32, len(b.members)) + for i := range b.members { + plan[b.members[i].MemberID] = make(map[string][]int32) + } + return &BalancePlan{plan} +} + +// ConsumerBalancerBalance is what the ConsumerBalancer invokes to balance a +// group. +// +// This is a complicated interface, but in short, this interface has one +// function that implements the actual balancing logic: using the input +// balancer, balance the input topics and partitions. If your balancing can +// fail, you can use ConsumerBalancer.SetError(...) to return an error from +// balancing, and then you can simply return nil from Balance. +type ConsumerBalancerBalance interface { + Balance(*ConsumerBalancer, map[string]int32) IntoSyncAssignment +} + +// ParseConsumerSyncAssignment returns an assignment as specified a +// kmsg.ConsumerMemberAssignment, that is, the type encoded in metadata for the +// consumer protocol. +func ParseConsumerSyncAssignment(assignment []byte) (map[string][]int32, error) { + var kassignment kmsg.ConsumerMemberAssignment + if err := kassignment.ReadFrom(assignment); err != nil { + return nil, fmt.Errorf("sync assignment parse failed: %v", err) + } + + m := make(map[string][]int32, len(kassignment.Topics)) + for _, topic := range kassignment.Topics { + m[topic.Topic] = topic.Partitions + } + return m, nil +} + +// NewConsumerBalancer parses the each member's metadata as a +// kmsg.ConsumerMemberMetadata and returns a ConsumerBalancer to use in balancing. +// +// If any metadata parsing fails, this returns an error. +func NewConsumerBalancer(balance ConsumerBalancerBalance, members []kmsg.JoinGroupResponseMember) (*ConsumerBalancer, error) { + b := &ConsumerBalancer{ + b: balance, + members: members, + metadatas: make([]kmsg.ConsumerMemberMetadata, len(members)), + topics: make(map[string]struct{}), + } + + for i, member := range members { + meta := &b.metadatas[i] + meta.Default() + memberMeta := member.ProtocolMetadata + if err := meta.ReadFrom(memberMeta); err != nil { + // Some buggy clients claimed support for v1 but then + // did not add OwnedPartitions, resulting in a short + // metadata. If we fail at reading and the version is + // v1, we retry again as v0. We do not support other + // versions because hopefully other clients stop + // claiming higher and higher version support and not + // actually supporting them. Sarama has a similarish + // workaround. See #493. + if bytes.HasPrefix(memberMeta, []byte{0, 1}) { + memberMeta[0] = 0 + memberMeta[1] = 0 + if err = meta.ReadFrom(memberMeta); err != nil { + return nil, fmt.Errorf("unable to read member metadata: %v", err) + } + } + } + for _, topic := range meta.Topics { + b.topics[topic] = struct{}{} + } + sort.Strings(meta.Topics) + } + + return b, nil +} + +// BalancePlan is a helper type to build the result of balancing topics +// and partitions among group members. +type BalancePlan struct { + plan map[string]map[string][]int32 // member => topic => partitions +} + +// AsMemberIDMap returns the plan as a map of member IDs to their topic & +// partition assignments. +// +// Internally, a BalancePlan is currently represented as this map. Any +// modification to the map modifies the plan. The internal representation of a +// plan may change in the future to include more metadata. If this happens, the +// map returned from this function may not represent all aspects of a plan. +// The client will attempt to mirror modifications to the map directly back +// into the underlying plan as best as possible. +func (p *BalancePlan) AsMemberIDMap() map[string]map[string][]int32 { + return p.plan +} + +func (p *BalancePlan) String() string { + var sb strings.Builder + + var membersWritten int + for member, topics := range p.plan { + membersWritten++ + sb.WriteString(member) + sb.WriteString("{") + + var topicsWritten int + for topic, partitions := range topics { + fmt.Fprintf(&sb, "%s%v", topic, partitions) + topicsWritten++ + if topicsWritten < len(topics) { + sb.WriteString(", ") + } + } + + sb.WriteString("}") + if membersWritten < len(p.plan) { + sb.WriteString(", ") + } + } + + return sb.String() +} + +// AddPartition assigns a partition for the topic to a given member. +func (p *BalancePlan) AddPartition(member *kmsg.JoinGroupResponseMember, topic string, partition int32) { + memberPlan := p.plan[member.MemberID] + memberPlan[topic] = append(memberPlan[topic], partition) +} + +// AddPartitions assigns many partitions for a topic to a given member. +func (p *BalancePlan) AddPartitions(member *kmsg.JoinGroupResponseMember, topic string, partitions []int32) { + memberPlan := p.plan[member.MemberID] + memberPlan[topic] = append(memberPlan[topic], partitions...) +} + +// IntoSyncAssignment satisfies the IntoSyncAssignment interface. +func (p *BalancePlan) IntoSyncAssignment() []kmsg.SyncGroupRequestGroupAssignment { + kassignments := make([]kmsg.SyncGroupRequestGroupAssignment, 0, len(p.plan)) + for member, assignment := range p.plan { + var kassignment kmsg.ConsumerMemberAssignment + for topic, partitions := range assignment { + sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) + assnTopic := kmsg.NewConsumerMemberAssignmentTopic() + assnTopic.Topic = topic + assnTopic.Partitions = partitions + kassignment.Topics = append(kassignment.Topics, assnTopic) + } + sort.Slice(kassignment.Topics, func(i, j int) bool { return kassignment.Topics[i].Topic < kassignment.Topics[j].Topic }) + syncAssn := kmsg.NewSyncGroupRequestGroupAssignment() + syncAssn.MemberID = member + syncAssn.MemberAssignment = kassignment.AppendTo(nil) + kassignments = append(kassignments, syncAssn) + } + sort.Slice(kassignments, func(i, j int) bool { return kassignments[i].MemberID < kassignments[j].MemberID }) + return kassignments +} + +func joinMemberLess(l, r *kmsg.JoinGroupResponseMember) bool { + if l.InstanceID != nil { + if r.InstanceID == nil { + return true + } + return *l.InstanceID < *r.InstanceID + } + if r.InstanceID != nil { + return false + } + return l.MemberID < r.MemberID +} + +func sortJoinMembers(members []kmsg.JoinGroupResponseMember) { + sort.Slice(members, func(i, j int) bool { return joinMemberLess(&members[i], &members[j]) }) +} + +func sortJoinMemberPtrs(members []*kmsg.JoinGroupResponseMember) { + sort.Slice(members, func(i, j int) bool { return joinMemberLess(members[i], members[j]) }) +} + +func (g *groupConsumer) findBalancer(from, proto string) (GroupBalancer, error) { + for _, b := range g.cfg.balancers { + if b.ProtocolName() == proto { + return b, nil + } + } + var ours []string + for _, b := range g.cfg.balancers { + ours = append(ours, b.ProtocolName()) + } + g.cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("%s could not find broker-chosen balancer", from), "kafka_choice", proto, "our_set", strings.Join(ours, ", ")) + return nil, fmt.Errorf("unable to balance: none of our balancers have a name equal to the balancer chosen for balancing (%s)", proto) +} + +// balanceGroup returns a balancePlan from a join group response. +// +// If the group has topics this leader does not want to consume, this also +// returns all topics and partitions; the leader will then periodically do its +// own metadata update to see if partition counts have changed for these random +// topics. +func (g *groupConsumer) balanceGroup(proto string, members []kmsg.JoinGroupResponseMember, skipBalance bool) ([]kmsg.SyncGroupRequestGroupAssignment, error) { + g.cl.cfg.logger.Log(LogLevelInfo, "balancing group as leader") + + b, err := g.findBalancer("balance group", proto) + if err != nil { + return nil, err + } + + sortJoinMembers(members) + + memberBalancer, topics, err := b.MemberBalancer(members) + if err != nil { + return nil, fmt.Errorf("unable to create group member balancer: %v", err) + } + + myTopics := g.tps.load() + var needMeta bool + topicPartitionCount := make(map[string]int32, len(topics)) + for topic := range topics { + data, exists := myTopics[topic] + if !exists { + needMeta = true + continue + } + topicPartitionCount[topic] = int32(len(data.load().partitions)) + } + + // If our consumer metadata does not contain all topics, the group is + // expressing interests in topics we are not consuming. Perhaps we have + // those topics saved in our external topics map. + if needMeta { + g.loadExternal().fn(func(m map[string]int32) { + needMeta = false + for topic := range topics { + partitions, exists := m[topic] + if !exists { + needMeta = true + continue + } + topicPartitionCount[topic] = partitions + } + }) + } + + if needMeta { + g.cl.cfg.logger.Log(LogLevelInfo, "group members indicated interest in topics the leader is not assigned, fetching metadata for all group topics") + var metaTopics []string + for topic := range topics { + metaTopics = append(metaTopics, topic) + } + + _, resp, err := g.cl.fetchMetadataForTopics(g.ctx, false, metaTopics) + if err != nil { + return nil, fmt.Errorf("unable to fetch metadata for group topics: %v", err) + } + for i := range resp.Topics { + t := &resp.Topics[i] + if t.Topic == nil { + g.cl.cfg.logger.Log(LogLevelWarn, "metadata resp in balance for topic has nil topic, skipping...", "err", kerr.ErrorForCode(t.ErrorCode)) + continue + } + if t.ErrorCode != 0 { + g.cl.cfg.logger.Log(LogLevelWarn, "metadata resp in balance for topic has error, skipping...", "topic", t.Topic, "err", kerr.ErrorForCode(t.ErrorCode)) + continue + } + topicPartitionCount[*t.Topic] = int32(len(t.Partitions)) + } + + g.initExternal(topicPartitionCount) + } + + // If the returned balancer is a ConsumerBalancer (which it likely + // always will be), then we can print some useful debugging information + // about what member interests are. + if b, ok := memberBalancer.(*ConsumerBalancer); ok { + interests := new(bytes.Buffer) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + interests.Reset() + fmt.Fprintf(interests, "interested topics: %v, previously owned: ", meta.Topics) + for _, owned := range meta.OwnedPartitions { + sort.Slice(owned.Partitions, func(i, j int) bool { return owned.Partitions[i] < owned.Partitions[j] }) + fmt.Fprintf(interests, "%s%v, ", owned.Topic, owned.Partitions) + } + strInterests := interests.String() + strInterests = strings.TrimSuffix(strInterests, ", ") + + if member.InstanceID == nil { + g.cl.cfg.logger.Log(LogLevelInfo, "balance group member", "id", member.MemberID, "interests", strInterests) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "balance group member", "id", member.MemberID, "instance_id", *member.InstanceID, "interests", strInterests) + } + }) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "unable to log information about group member interests: the user has defined a custom balancer (not a *ConsumerBalancer)") + } + + // KIP-814: we are leader and we know what the entire group is + // consuming. Crucially, we parsed topics that we are potentially not + // interested in and are now tracking them for metadata updates. We + // have logged the current interests, we do not need to actually + // balance. + if skipBalance { + switch proto := b.ProtocolName(); proto { + case RangeBalancer().ProtocolName(), + RoundRobinBalancer().ProtocolName(), + StickyBalancer().ProtocolName(), + CooperativeStickyBalancer().ProtocolName(): + default: + return nil, nil + } + } + + // If the returned IntoSyncAssignment is a BalancePlan, which it likely + // is if the balancer is a ConsumerBalancer, then we can again print + // more useful debugging information. + var into IntoSyncAssignment + if memberBalancerOrErr, ok := memberBalancer.(GroupMemberBalancerOrError); ok { + if into, err = memberBalancerOrErr.BalanceOrError(topicPartitionCount); err != nil { + g.cl.cfg.logger.Log(LogLevelError, "balance failed", "err", err) + return nil, err + } + } else { + into = memberBalancer.Balance(topicPartitionCount) + } + + if p, ok := into.(*BalancePlan); ok { + g.cl.cfg.logger.Log(LogLevelInfo, "balanced", "plan", p.String()) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "unable to log balance plan: the user has returned a custom IntoSyncAssignment (not a *BalancePlan)") + } + + return into.IntoSyncAssignment(), nil +} + +// helper func; range and roundrobin use v0 +func simpleMemberMetadata(interests []string, generation int32) []byte { + meta := kmsg.NewConsumerMemberMetadata() + meta.Version = 3 // BUMP ME WHEN NEW FIELDS ARE ADDED, AND BUMP BELOW + meta.Topics = interests // input interests are already sorted + // meta.OwnedPartitions is nil, since simple protocols are not cooperative + meta.Generation = generation + return meta.AppendTo(nil) +} + +/////////////////// +// Balance Plans // +/////////////////// + +// RoundRobinBalancer returns a group balancer that evenly maps topics and +// partitions to group members. +// +// Suppose there are two members M0 and M1, two topics t0 and t1, and each +// topic has three partitions p0, p1, and p2. The partition balancing will be +// +// M0: [t0p0, t0p2, t1p1] +// M1: [t0p1, t1p0, t1p2] +// +// If all members subscribe to all topics equally, the roundrobin balancer +// will give a perfect balance. However, if topic subscriptions are quite +// unequal, the roundrobin balancer may lead to a bad balance. See KIP-49 +// for one example (note that the fair strategy mentioned in KIP-49 does +// not exist). +// +// This is equivalent to the Java roundrobin balancer. +func RoundRobinBalancer() GroupBalancer { + return new(roundRobinBalancer) +} + +type roundRobinBalancer struct{} + +func (*roundRobinBalancer) ProtocolName() string { return "roundrobin" } +func (*roundRobinBalancer) IsCooperative() bool { return false } +func (*roundRobinBalancer) JoinGroupMetadata(interests []string, _ map[string][]int32, generation int32) []byte { + return simpleMemberMetadata(interests, generation) +} + +func (*roundRobinBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (r *roundRobinBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(r, members) + return b, b.MemberTopics(), err +} + +func (*roundRobinBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + type topicPartition struct { + topic string + partition int32 + } + var nparts int + for _, partitions := range topics { + nparts += int(partitions) + } + // Order all partitions available to balance, filtering out those that + // no members are subscribed to. + allParts := make([]topicPartition, 0, nparts) + for topic := range b.MemberTopics() { + for partition := int32(0); partition < topics[topic]; partition++ { + allParts = append(allParts, topicPartition{ + topic, + partition, + }) + } + } + sort.Slice(allParts, func(i, j int) bool { + l, r := allParts[i], allParts[j] + return l.topic < r.topic || l.topic == r.topic && l.partition < r.partition + }) + + plan := b.NewPlan() + // While parts are unassigned, assign them. + var memberIdx int + for len(allParts) > 0 { + next := allParts[0] + allParts = allParts[1:] + + // The Java roundrobin strategy walks members circularly until + // a member can take this partition, and then starts the next + // partition where the circular iterator left off. + assigned: + for { + member, meta := b.MemberAt(memberIdx) + memberIdx = (memberIdx + 1) % len(b.Members()) + for _, topic := range meta.Topics { + if topic == next.topic { + plan.AddPartition(member, next.topic, next.partition) + break assigned + } + } + } + } + + return plan +} + +// RangeBalancer returns a group balancer that, per topic, maps partitions to +// group members. Since this works on a topic level, uneven partitions per +// topic to the number of members can lead to slight partition consumption +// disparities. +// +// Suppose there are two members M0 and M1, two topics t0 and t1, and each +// topic has three partitions p0, p1, and p2. The partition balancing will be +// +// M0: [t0p0, t0p1, t1p0, t1p1] +// M1: [t0p2, t1p2] +// +// This is equivalent to the Java range balancer. +func RangeBalancer() GroupBalancer { + return new(rangeBalancer) +} + +type rangeBalancer struct{} + +func (*rangeBalancer) ProtocolName() string { return "range" } +func (*rangeBalancer) IsCooperative() bool { return false } +func (*rangeBalancer) JoinGroupMetadata(interests []string, _ map[string][]int32, generation int32) []byte { + return simpleMemberMetadata(interests, generation) +} + +func (*rangeBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (r *rangeBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(r, members) + return b, b.MemberTopics(), err +} + +func (*rangeBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + topics2PotentialConsumers := make(map[string][]*kmsg.JoinGroupResponseMember) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + for _, topic := range meta.Topics { + topics2PotentialConsumers[topic] = append(topics2PotentialConsumers[topic], member) + } + }) + + plan := b.NewPlan() + for topic, potentialConsumers := range topics2PotentialConsumers { + sortJoinMemberPtrs(potentialConsumers) + + numPartitions := topics[topic] + partitions := make([]int32, numPartitions) + for i := range partitions { + partitions[i] = int32(i) + } + numParts := len(partitions) + div, rem := numParts/len(potentialConsumers), numParts%len(potentialConsumers) + + var consumerIdx int + for len(partitions) > 0 { + num := div + if rem > 0 { + num++ + rem-- + } + + member := potentialConsumers[consumerIdx] + plan.AddPartitions(member, topic, partitions[:num]) + + consumerIdx++ + partitions = partitions[num:] + } + } + + return plan +} + +// StickyBalancer returns a group balancer that ensures minimal partition +// movement on group changes while also ensuring optimal balancing. +// +// Suppose there are three members M0, M1, and M2, and two topics t0 and t1 +// each with three partitions p0, p1, and p2. If the initial balance plan looks +// like +// +// M0: [t0p0, t0p1, t0p2] +// M1: [t1p0, t1p1, t1p2] +// M2: [t2p0, t2p2, t2p2] +// +// If M2 disappears, both roundrobin and range would have mostly destructive +// reassignments. +// +// Range would result in +// +// M0: [t0p0, t0p1, t1p0, t1p1, t2p0, t2p1] +// M1: [t0p2, t1p2, t2p2] +// +// which is imbalanced and has 3 partitions move from members that did not need +// to move (t0p2, t1p0, t1p1). +// +// RoundRobin would result in +// +// M0: [t0p0, t0p2, t1p1, t2p0, t2p2] +// M1: [t0p1, t1p0, t1p2, t2p1] +// +// which is balanced, but has 2 partitions move when they do not need to +// (t0p1, t1p1). +// +// Sticky balancing results in +// +// M0: [t0p0, t0p1, t0p2, t2p0, t2p2] +// M1: [t1p0, t1p1, t1p2, t2p1] +// +// which is balanced and does not cause any unnecessary partition movement. +// The actual t2 partitions may not be in that exact combination, but they +// will be balanced. +// +// An advantage of the sticky consumer is that it allows API users to +// potentially avoid some cleanup until after the consumer knows which +// partitions it is losing when it gets its new assignment. Users can +// then only cleanup state for partitions that changed, which will be +// minimal (see KIP-54; this client also includes the KIP-351 bugfix). +// +// Note that this API implements the sticky partitioning quite differently from +// the Java implementation. The Java implementation is difficult to reason +// about and has many edge cases that result in non-optimal balancing (albeit, +// you likely have to be trying to hit those edge cases). This API uses a +// different algorithm to ensure optimal balancing while being an order of +// magnitude faster. +// +// Since the new strategy is a strict improvement over the Java strategy, it is +// entirely compatible. Any Go client sharing a group with a Java client will +// not have its decisions undone on leadership change from a Go consumer to a +// Java one. Java balancers do not apply the strategy it comes up with if it +// deems the balance score equal to or worse than the original score (the score +// being effectively equal to the standard deviation of the mean number of +// assigned partitions). This Go sticky balancer is optimal and extra sticky. +// Thus, the Java balancer will never back out of a strategy from this +// balancer. +func StickyBalancer() GroupBalancer { + return &stickyBalancer{cooperative: false} +} + +type stickyBalancer struct { + cooperative bool +} + +func (s *stickyBalancer) ProtocolName() string { + if s.cooperative { + return "cooperative-sticky" + } + return "sticky" +} +func (s *stickyBalancer) IsCooperative() bool { return s.cooperative } +func (s *stickyBalancer) JoinGroupMetadata(interests []string, currentAssignment map[string][]int32, generation int32) []byte { + meta := kmsg.NewConsumerMemberMetadata() + meta.Version = 3 // BUMP ME WHEN NEW FIELDS ARE ADDED, AND BUMP ABOVE + meta.Topics = interests + meta.Generation = generation + stickyMeta := kmsg.NewStickyMemberMetadata() + stickyMeta.Generation = generation + for topic, partitions := range currentAssignment { + if s.cooperative { + metaPart := kmsg.NewConsumerMemberMetadataOwnedPartition() + metaPart.Topic = topic + metaPart.Partitions = partitions + meta.OwnedPartitions = append(meta.OwnedPartitions, metaPart) + } + stickyAssn := kmsg.NewStickyMemberMetadataCurrentAssignment() + stickyAssn.Topic = topic + stickyAssn.Partitions = partitions + stickyMeta.CurrentAssignment = append(stickyMeta.CurrentAssignment, stickyAssn) + } + + // KAFKA-12898: ensure our topics are sorted + metaOwned := meta.OwnedPartitions + stickyCurrent := stickyMeta.CurrentAssignment + sort.Slice(metaOwned, func(i, j int) bool { return metaOwned[i].Topic < metaOwned[j].Topic }) + sort.Slice(stickyCurrent, func(i, j int) bool { return stickyCurrent[i].Topic < stickyCurrent[j].Topic }) + + meta.UserData = stickyMeta.AppendTo(nil) + return meta.AppendTo(nil) +} + +func (*stickyBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (s *stickyBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(s, members) + return b, b.MemberTopics(), err +} + +func (s *stickyBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + // Since our input into balancing is already sorted by instance ID, + // the sticky strategy does not need to worry about instance IDs at all. + // See my (slightly rambling) comment on KAFKA-8432. + stickyMembers := make([]sticky.GroupMember, 0, len(b.Members())) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + stickyMembers = append(stickyMembers, sticky.GroupMember{ + ID: member.MemberID, + Topics: meta.Topics, + UserData: meta.UserData, + Owned: meta.OwnedPartitions, + Generation: meta.Generation, + Cooperative: s.cooperative, + }) + }) + + p := &BalancePlan{sticky.Balance(stickyMembers, topics)} + if s.cooperative { + p.AdjustCooperative(b) + } + return p +} + +// CooperativeStickyBalancer performs the sticky balancing strategy, but +// additionally opts the consumer group into "cooperative" rebalancing. +// +// Cooperative rebalancing differs from "eager" (the original) rebalancing in +// that group members do not stop processing partitions during the rebalance. +// Instead, once they receive their new assignment, each member determines +// which partitions it needs to revoke. If any, they send a new join request +// (before syncing), and the process starts over. This should ultimately end up +// in only two join rounds, with the major benefit being that processing never +// needs to stop. +// +// NOTE once a group is collectively using cooperative balancing, it is unsafe +// to have a member join the group that does not support cooperative balancing. +// If the only-eager member is elected leader, it will not know of the new +// multiple join strategy and things will go awry. Thus, once a group is +// entirely on cooperative rebalancing, it cannot go back. +// +// Migrating an eager group to cooperative balancing requires two rolling +// bounce deploys. The first deploy should add the cooperative-sticky strategy +// as an option (that is, each member goes from using one balance strategy to +// two). During this deploy, Kafka will tell leaders to continue using the old +// eager strategy, since the old eager strategy is the only one in common among +// all members. The second rolling deploy removes the old eager strategy. At +// this point, Kafka will tell the leader to use cooperative-sticky balancing. +// During this roll, all members in the group that still have both strategies +// continue to be eager and give up all of their partitions every rebalance. +// However, once a member only has cooperative-sticky, it can begin using this +// new strategy and things will work correctly. See KIP-429 for more details. +func CooperativeStickyBalancer() GroupBalancer { + return &stickyBalancer{cooperative: true} +} + +// AdjustCooperative performs the final adjustment to a plan for cooperative +// balancing. +// +// Over the plan, we remove all partitions that migrated from one member (where +// it was assigned) to a new member (where it is now planned). +// +// This allows members that had partitions removed to revoke and rejoin, which +// will then do another rebalance, and in that new rebalance, the planned +// partitions are now on the free list to be assigned. +func (p *BalancePlan) AdjustCooperative(b *ConsumerBalancer) { + allAdded := make(map[string]map[int32]string, 100) // topic => partition => member + allRevoked := make(map[string]map[int32]struct{}, 100) + + addT := func(t string) map[int32]string { + addT := allAdded[t] + if addT == nil { + addT = make(map[int32]string, 20) + allAdded[t] = addT + } + return addT + } + revokeT := func(t string) map[int32]struct{} { + revokeT := allRevoked[t] + if revokeT == nil { + revokeT = make(map[int32]struct{}, 20) + allRevoked[t] = revokeT + } + return revokeT + } + + tmap := make(map[string]struct{}) // reusable topic existence map + pmap := make(map[int32]struct{}) // reusable partitions existence map + + plan := p.plan + + // First, on all members, we find what was added and what was removed + // to and from that member. + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + planned := plan[member.MemberID] + + // added := planned - current + // revoked := current - planned + + for ptopic := range planned { // set existence for all planned topics + tmap[ptopic] = struct{}{} + } + for _, otopic := range meta.OwnedPartitions { // over all prior owned topics, + topic := otopic.Topic + delete(tmap, topic) + ppartitions, exists := planned[topic] + if !exists { // any topic that is no longer planned was entirely revoked, + allRevokedT := revokeT(topic) + for _, opartition := range otopic.Partitions { + allRevokedT[opartition] = struct{}{} + } + continue + } + // calculate what was added by creating a planned existence map, + // then removing what was owned, and anything that remains is new, + for _, ppartition := range ppartitions { + pmap[ppartition] = struct{}{} + } + for _, opartition := range otopic.Partitions { + delete(pmap, opartition) + } + if len(pmap) > 0 { + allAddedT := addT(topic) + for ppartition := range pmap { + delete(pmap, ppartition) + allAddedT[ppartition] = member.MemberID + } + } + // then calculate removal by creating owned existence map, + // then removing what was planned, anything remaining was revoked. + for _, opartition := range otopic.Partitions { + pmap[opartition] = struct{}{} + } + for _, ppartition := range ppartitions { + delete(pmap, ppartition) + } + if len(pmap) > 0 { + allRevokedT := revokeT(topic) + for opartition := range pmap { + delete(pmap, opartition) + allRevokedT[opartition] = struct{}{} + } + } + } + for ptopic := range tmap { // finally, anything remaining in tmap is a new planned topic. + delete(tmap, ptopic) + allAddedT := addT(ptopic) + for _, ppartition := range planned[ptopic] { + allAddedT[ppartition] = member.MemberID + } + } + }) + + // Over all revoked, if the revoked partition was added to a different + // member, we remove that partition from the new member. + for topic, rpartitions := range allRevoked { + atopic, exists := allAdded[topic] + if !exists { + continue + } + for rpartition := range rpartitions { + amember, exists := atopic[rpartition] + if !exists { + continue + } + + ptopics := plan[amember] + ppartitions := ptopics[topic] + for i, ppartition := range ppartitions { + if ppartition == rpartition { + ppartitions[i] = ppartitions[len(ppartitions)-1] + ppartitions = ppartitions[:len(ppartitions)-1] + break + } + } + if len(ppartitions) > 0 { + ptopics[topic] = ppartitions + } else { + delete(ptopics, topic) + } + } + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go b/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go new file mode 100644 index 000000000000..aeff4f19df0a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go @@ -0,0 +1,420 @@ +package kgo + +import ( + "net" + "time" +) + +//////////////////////////////////////////////////////////////// +// NOTE: // +// NOTE: Make sure new hooks are checked in implementsAnyHook // +// NOTE: // +//////////////////////////////////////////////////////////////// + +// Hook is a hook to be called when something happens in kgo. +// +// The base Hook interface is useless, but wherever a hook can occur in kgo, +// the client checks if your hook implements an appropriate interface. If so, +// your hook is called. +// +// This allows you to only hook in to behavior you care about, and it allows +// the client to add more hooks in the future. +// +// All hook interfaces in this package have Hook in the name. Hooks must be +// safe for concurrent use. It is expected that hooks are fast; if a hook needs +// to take time, then copy what you need and ensure the hook is async. +type Hook any + +type hooks []Hook + +func (hs hooks) each(fn func(Hook)) { + for _, h := range hs { + fn(h) + } +} + +// HookNewClient is called in NewClient after a client is initialized. This +// hook can be used to perform final setup work in your hooks. +type HookNewClient interface { + // OnNewClient is passed the newly initialized client, before any + // client goroutines are started. + OnNewClient(*Client) +} + +// HookClientClosed is called in Close or CloseAfterRebalance after a client +// has been closed. This hook can be used to perform final cleanup work. +type HookClientClosed interface { + // OnClientClosed is passed the client that has been closed, after + // all client-internal close cleanup has happened. + OnClientClosed(*Client) +} + +////////////////// +// BROKER HOOKS // +////////////////// + +// HookBrokerConnect is called after a connection to a broker is opened. +type HookBrokerConnect interface { + // OnBrokerConnect is passed the broker metadata, how long it took to + // dial, and either the dial's resulting net.Conn or error. + OnBrokerConnect(meta BrokerMetadata, dialDur time.Duration, conn net.Conn, err error) +} + +// HookBrokerDisconnect is called when a connection to a broker is closed. +type HookBrokerDisconnect interface { + // OnBrokerDisconnect is passed the broker metadata and the connection + // that is closing. + OnBrokerDisconnect(meta BrokerMetadata, conn net.Conn) +} + +// HookBrokerWrite is called after a write to a broker. +// +// Kerberos SASL does not cause write hooks, since it directly writes to the +// connection. +type HookBrokerWrite interface { + // OnBrokerWrite is passed the broker metadata, the key for the request + // that was written, the number of bytes that were written (may not be + // the whole request if there was an error), how long the request + // waited before being written (including throttling waiting), how long + // it took to write the request, and any error. + // + // The bytes written does not count any tls overhead. + OnBrokerWrite(meta BrokerMetadata, key int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) +} + +// HookBrokerRead is called after a read from a broker. +// +// Kerberos SASL does not cause read hooks, since it directly reads from the +// connection. +type HookBrokerRead interface { + // OnBrokerRead is passed the broker metadata, the key for the response + // that was read, the number of bytes read (may not be the whole read + // if there was an error), how long the client waited before reading + // the response, how long it took to read the response, and any error. + // + // The bytes read does not count any tls overhead. + OnBrokerRead(meta BrokerMetadata, key int16, bytesRead int, readWait, timeToRead time.Duration, err error) +} + +// BrokerE2E tracks complete information for a write of a request followed by a +// read of that requests's response. +// +// Note that if this is for a produce request with no acks, there will be no +// read wait / time to read. +type BrokerE2E struct { + // BytesWritten is the number of bytes written for this request. + // + // This may not be the whole request if there was an error while writing. + BytesWritten int + + // BytesRead is the number of bytes read for this requests's response. + // + // This may not be the whole response if there was an error while + // reading, and this will be zero if there was a write error. + BytesRead int + + // WriteWait is the time spent waiting from when this request was + // generated internally in the client to just before the request is + // written to the connection. This number is not included in the + // DurationE2E method. + WriteWait time.Duration + // TimeToWrite is how long a request took to be written on the wire. + // This specifically tracks only how long conn.Write takes. + TimeToWrite time.Duration + // ReadWait tracks the span of time immediately following conn.Write + // until conn.Read begins. + ReadWait time.Duration + // TimeToRead tracks how long conn.Read takes for this request to be + // entirely read. This includes the time it takes to allocate a buffer + // for the response after the initial four size bytes are read. + TimeToRead time.Duration + + // WriteErr is any error encountered during writing. If a write error is + // encountered, no read will be attempted. + WriteErr error + // ReadErr is any error encountered during reading. + ReadErr error +} + +// DurationE2E returns the e2e time from the start of when a request is written +// to the end of when the response for that request was fully read. If a write +// or read error occurs, this hook is called with all information possible at +// the time (e.g., if a write error occurs, all write info is specified). +// +// Kerberos SASL does not cause this hook, since it directly reads from the +// connection. +func (e *BrokerE2E) DurationE2E() time.Duration { + return e.TimeToWrite + e.ReadWait + e.TimeToRead +} + +// Err returns the first of either the write err or the read err. If this +// return is non-nil, the request/response had an error. +func (e *BrokerE2E) Err() error { + if e.WriteErr != nil { + return e.WriteErr + } + return e.ReadErr +} + +// HookBrokerE2E is called after a write to a broker that errors, or after a +// read to a broker. +// +// This differs from HookBrokerRead and HookBrokerWrite by tracking all E2E +// info for a write and a read, which allows for easier e2e metrics. This hook +// can replace both the read and write hook. +type HookBrokerE2E interface { + // OnBrokerE2E is passed the broker metadata, the key for the + // request/response that was written/read, and the e2e info for the + // request and response. + OnBrokerE2E(meta BrokerMetadata, key int16, e2e BrokerE2E) +} + +// HookBrokerThrottle is called after a response to a request is read +// from a broker, and the response identifies throttling in effect. +type HookBrokerThrottle interface { + // OnBrokerThrottle is passed the broker metadata, the imposed + // throttling interval, and whether the throttle was applied before + // Kafka responded to them request or after. + // + // For Kafka < 2.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0, the throttle is applied after issuing a response. + // + // If throttledAfterResponse is false, then Kafka already applied the + // throttle. If it is true, the client internally will not send another + // request until the throttle deadline has passed. + OnBrokerThrottle(meta BrokerMetadata, throttleInterval time.Duration, throttledAfterResponse bool) +} + +////////// +// MISC // +////////// + +// HookGroupManageError is called after every error that causes the client, +// operating as a group member, to break out of the group managing loop and +// backoff temporarily. +// +// Specifically, any error that would result in OnPartitionsLost being called +// will result in this hook being called. +type HookGroupManageError interface { + // OnGroupManageError is passed the error that killed a group session. + // This can be used to detect potentially fatal errors and act on them + // at runtime to recover (such as group auth errors, or group max size + // reached). + OnGroupManageError(error) +} + +/////////////////////////////// +// PRODUCE & CONSUME BATCHES // +/////////////////////////////// + +// ProduceBatchMetrics tracks information about successful produces to +// partitions. +type ProduceBatchMetrics struct { + // NumRecords is the number of records that were produced in this + // batch. + NumRecords int + + // UncompressedBytes is the number of bytes the records serialized as + // before compression. + // + // For record batches (Kafka v0.11.0+), this is the size of the records + // in a batch, and does not include record batch overhead. + // + // For message sets, this size includes message set overhead. + UncompressedBytes int + + // CompressedBytes is the number of bytes actually written for this + // batch, after compression. If compression is not used, this will be + // equal to UncompresedBytes. + // + // For record batches, this is the size of the compressed records, and + // does not include record batch overhead. + // + // For message sets, this is the size of the compressed message set. + CompressedBytes int + + // CompressionType signifies which algorithm the batch was compressed + // with. + // + // 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is + // zstd. + CompressionType uint8 +} + +// HookProduceBatchWritten is called whenever a batch is known to be +// successfully produced. +type HookProduceBatchWritten interface { + // OnProduceBatchWritten is called per successful batch written to a + // topic partition + OnProduceBatchWritten(meta BrokerMetadata, topic string, partition int32, metrics ProduceBatchMetrics) +} + +// FetchBatchMetrics tracks information about fetches of batches. +type FetchBatchMetrics struct { + // NumRecords is the number of records that were fetched in this batch. + // + // Note that this number includes transaction markers, which are not + // actually returned to the user. + // + // If the batch has an encoding error, this will be 0. + NumRecords int + + // UncompressedBytes is the number of bytes the records deserialized + // into after decompresion. + // + // For record batches (Kafka v0.11.0+), this is the size of the records + // in a batch, and does not include record batch overhead. + // + // For message sets, this size includes message set overhead. + // + // Note that this number may be higher than the corresponding number + // when producing, because as an "optimization", Kafka can return + // partial batches when fetching. + UncompressedBytes int + + // CompressedBytes is the number of bytes actually read for this batch, + // before decompression. If the batch was not compressed, this will be + // equal to UncompressedBytes. + // + // For record batches, this is the size of the compressed records, and + // does not include record batch overhead. + // + // For message sets, this is the size of the compressed message set. + CompressedBytes int + + // CompressionType signifies which algorithm the batch was compressed + // with. + // + // 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is + // zstd. + CompressionType uint8 +} + +// HookFetchBatchRead is called whenever a batch if read within the client. +// +// Note that this hook is called when processing, but a batch may be internally +// discarded after processing in some uncommon specific circumstances. +// +// If the client reads v0 or v1 message sets, and they are not compressed, then +// this hook will be called per record. +type HookFetchBatchRead interface { + // OnFetchBatchRead is called per batch read from a topic partition. + OnFetchBatchRead(meta BrokerMetadata, topic string, partition int32, metrics FetchBatchMetrics) +} + +/////////////////////////////// +// PRODUCE & CONSUME RECORDS // +/////////////////////////////// + +// HookProduceRecordBuffered is called when a record is buffered internally in +// the client from a call to Produce. +// +// This hook can be used to write metrics that gather the number of records or +// bytes buffered, or the hook can be used to write interceptors that modify a +// record's key / value / headers before being produced. If you just want a +// metric for the number of records buffered, use the client's +// BufferedProduceRecords method, as it is faster. +// +// Note that this hook may slow down high-volume producing a bit. +type HookProduceRecordBuffered interface { + // OnProduceRecordBuffered is passed a record that is buffered. + // + // This hook is called immediately after Produce is called, after the + // function potentially sets the default topic. + OnProduceRecordBuffered(*Record) +} + +// HookProduceRecordPartitioned is called when a record is partitioned and +// internally ready to be flushed. +// +// This hook can be used to create metrics of buffered records per partition, +// and then you can correlate that to partition leaders and determine which +// brokers are having problems. +// +// Note that this hook will slow down high-volume producing and it is +// recommended to only use this temporarily or if you are ok with the +// performance hit. +type HookProduceRecordPartitioned interface { + // OnProduceRecordPartitioned is passed a record that has been + // partitioned and the current broker leader for the partition + // (note that the leader may change if the partition is moved). + // + // This hook is called once a record is queued to be flushed. The + // record's Partition and Timestamp fields are safe to read. + OnProduceRecordPartitioned(*Record, int32) +} + +// HookProduceRecordUnbuffered is called just before a record's promise is +// finished; this is effectively a mirror of a record promise. +// +// As an example, if using HookProduceRecordBuffered for a gauge of how many +// record bytes are buffered, this hook can be used to decrement the gauge. +// +// Note that this hook will slow down high-volume producing a bit. +type HookProduceRecordUnbuffered interface { + // OnProduceRecordUnbuffered is passed a record that is just about to + // have its produce promise called, as well as the error that the + // promise will be called with. + OnProduceRecordUnbuffered(*Record, error) +} + +// HookFetchRecordBuffered is called when a record is internally buffered after +// fetching, ready to be polled. +// +// This hook can be used to write gauge metrics regarding the number of records +// or bytes buffered, or to write interceptors that modify a record before +// being returned from polling. If you just want a metric for the number of +// records buffered, use the client's BufferedFetchRecords method, as it is +// faster. +// +// Note that this hook will slow down high-volume consuming a bit. +type HookFetchRecordBuffered interface { + // OnFetchRecordBuffered is passed a record that is now buffered, ready + // to be polled. + OnFetchRecordBuffered(*Record) +} + +// HookFetchRecordUnbuffered is called when a fetched record is unbuffered. +// +// A record can be internally discarded after being in some scenarios without +// being polled, such as when the internal assignment changes. +// +// As an example, if using HookFetchRecordBuffered for a gauge of how many +// record bytes are buffered ready to be polled, this hook can be used to +// decrement the gauge. +// +// Note that this hook may slow down high-volume consuming a bit. +type HookFetchRecordUnbuffered interface { + // OnFetchRecordUnbuffered is passwed a record that is being + // "unbuffered" within the client, and whether the record is being + // returned from polling. + OnFetchRecordUnbuffered(r *Record, polled bool) +} + +///////////// +// HELPERS // +///////////// + +// implementsAnyHook will check the incoming Hook for any Hook implementation +func implementsAnyHook(h Hook) bool { + switch h.(type) { + case HookNewClient, + HookClientClosed, + HookBrokerConnect, + HookBrokerDisconnect, + HookBrokerWrite, + HookBrokerRead, + HookBrokerE2E, + HookBrokerThrottle, + HookGroupManageError, + HookProduceBatchWritten, + HookFetchBatchRead, + HookProduceRecordBuffered, + HookProduceRecordPartitioned, + HookProduceRecordUnbuffered, + HookFetchRecordBuffered, + HookFetchRecordUnbuffered: + return true + } + return false +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go new file mode 100644 index 000000000000..3cf972b6edbc --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go @@ -0,0 +1,28 @@ +//go:build go1.21 +// +build go1.21 + +package sticky + +import "slices" + +func sortPartNums(ps memberPartitions) { + slices.Sort(ps) +} + +func (b *balancer) sortMemberByLiteralPartNum(memberNum int) { + partNums := b.plan[memberNum] + slices.SortFunc(partNums, func(lpNum, rpNum int32) int { + ltNum, rtNum := b.partOwners[lpNum], b.partOwners[rpNum] + li, ri := b.topicInfos[ltNum], b.topicInfos[rtNum] + lt, rt := li.topic, ri.topic + lp, rp := lpNum-li.partNum, rpNum-ri.partNum + if lp < rp { + return -1 + } else if lp > rp { + return 1 + } else if lt < rt { + return -1 + } + return 1 + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go new file mode 100644 index 000000000000..addd2bbc19c1 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go @@ -0,0 +1,22 @@ +//go:build !go1.21 +// +build !go1.21 + +package sticky + +import "sort" + +func sortPartNums(partNums memberPartitions) { + sort.Slice(partNums, func(i, j int) bool { return partNums[i] < partNums[j] }) +} + +func (b *balancer) sortMemberByLiteralPartNum(memberNum int) { + partNums := b.plan[memberNum] + sort.Slice(partNums, func(i, j int) bool { + lpNum, rpNum := partNums[i], partNums[j] + ltNum, rtNum := b.partOwners[lpNum], b.partOwners[rpNum] + li, ri := b.topicInfos[ltNum], b.topicInfos[rtNum] + lt, rt := li.topic, ri.topic + lp, rp := lpNum-li.partNum, rpNum-ri.partNum + return lp < rp || (lp == rp && lt < rt) + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go new file mode 100644 index 000000000000..d6bbb587ed2a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go @@ -0,0 +1,226 @@ +package sticky + +import "container/heap" + +// Graph maps members to partitions they want to steal. +// +// The representation was chosen so as to avoid updating all members on any +// partition move; move updates are one map update. +type graph struct { + b *balancer + + // node => edges out + // "from a node (member), which topicNum could we steal?" + out [][]uint32 + + // edge => who owns this edge; built in balancer's assignUnassigned + cxns []partitionConsumer + + // scores are all node scores from a search node. The distance field + // is reset on findSteal to infinityScore.. + scores pathScores + + // heapBuf and pathBuf are backing buffers that are reused every + // findSteal; note that pathBuf must be done being used before + // the next find steal, but it always is. + heapBuf pathHeap + pathBuf []stealSegment +} + +func (b *balancer) newGraph( + partitionConsumers []partitionConsumer, + topicPotentials [][]uint16, +) graph { + g := graph{ + b: b, + out: make([][]uint32, len(b.members)), + cxns: partitionConsumers, + scores: make([]pathScore, len(b.members)), + heapBuf: make([]*pathScore, len(b.members)), + } + outBufs := make([]uint32, len(b.members)*len(topicPotentials)) + for memberNum := range b.plan { + out := outBufs[:0:len(topicPotentials)] + outBufs = outBufs[len(topicPotentials):] + // In the worst case, if every node is linked to each other, + // each node will have nparts edges. We preallocate the worst + // case. It is common for the graph to be highly connected. + g.out[memberNum] = out + } + for topicNum, potentials := range topicPotentials { + for _, potential := range potentials { + g.out[potential] = append(g.out[potential], uint32(topicNum)) + } + } + return g +} + +func (g *graph) changeOwnership(edge int32, newDst uint16) { + g.cxns[edge].memberNum = newDst +} + +// findSteal uses Dijkstra search to find a path from the best node it can reach. +func (g *graph) findSteal(from uint16) ([]stealSegment, bool) { + // First, we must reset our scores from any prior run. This is O(M), + // but is fast and faster than making a map and extending it a lot. + for i := range g.scores { + g.scores[i].distance = infinityScore + g.scores[i].done = false + } + + first, _ := g.getScore(from) + + first.distance = 0 + first.done = true + + g.heapBuf = append(g.heapBuf[:0], first) + rem := &g.heapBuf + for rem.Len() > 0 { + current := heap.Pop(rem).(*pathScore) + if current.level > first.level+1 { + path := g.pathBuf[:0] + for current.parent != nil { + path = append(path, stealSegment{ + current.node, + current.parent.node, + current.srcEdge, + }) + current = current.parent + } + g.pathBuf = path + return path, true + } + + current.done = true + + for _, topicNum := range g.out[current.node] { + info := g.b.topicInfos[topicNum] + firstPartNum, lastPartNum := info.partNum, info.partNum+info.partitions + for edge := firstPartNum; edge < lastPartNum; edge++ { + neighborNode := g.cxns[edge].memberNum + neighbor, isNew := g.getScore(neighborNode) + if neighbor.done { + continue + } + + distance := current.distance + 1 + + // The neighbor is the current node that owns this edge. + // If our node originally owned this partition, then it + // would be preferable to steal edge back. + srcIsOriginal := g.cxns[edge].originalNum == current.node + + // If this is a new neighbor (our first time seeing the neighbor + // in our search), this is also the shortest path to reach them, + // where shortest defers preference to original sources THEN distance. + if isNew { + neighbor.parent = current + neighbor.srcIsOriginal = srcIsOriginal + neighbor.srcEdge = edge + neighbor.distance = distance + neighbor.heapIdx = len(*rem) + heap.Push(rem, neighbor) + } else if !neighbor.srcIsOriginal && srcIsOriginal { + // If the search path has seen this neighbor before, but + // we now are evaluating a partition that would increase + // stickiness if stolen, then fixup the neighbor's parent + // and srcEdge. + neighbor.parent = current + neighbor.srcIsOriginal = true + neighbor.srcEdge = edge + neighbor.distance = distance + heap.Fix(rem, neighbor.heapIdx) + } + } + } + } + + return nil, false +} + +type stealSegment struct { + src uint16 // member num + dst uint16 // member num + part int32 // partNum +} + +// As we traverse a graph, we assign each node a path score, which tracks a few +// numbers for what it would take to reach this node from our first node. +type pathScore struct { + // Done is set to true when we pop a node off of the graph. Once we + // pop a node, it means we have found a best path to that node and + // we do not want to revisit it for processing if any other future + // nodes reach back to this one. + done bool + + // srcIsOriginal is true if, were our parent to steal srcEdge, would + // that put srcEdge back on the original member. That is, if we are B + // and our parent is A, does our srcEdge originally belong do A? + // + // This field exists to work around a very slim edge case where a + // partition is stolen by B and then needs to be stolen back by A + // later. + srcIsOriginal bool + + node uint16 // our member num + distance int32 // how many steals it would take to get here + srcEdge int32 // the partition used to reach us + level int32 // partitions owned on this segment + parent *pathScore + heapIdx int +} + +type pathScores []pathScore + +const infinityScore = 1<<31 - 1 + +func (g *graph) getScore(node uint16) (*pathScore, bool) { + r := &g.scores[node] + exists := r.distance != infinityScore + if !exists { + *r = pathScore{ + node: node, + level: int32(len(g.b.plan[node])), + distance: infinityScore, + } + } + return r, !exists +} + +type pathHeap []*pathScore + +func (p *pathHeap) Len() int { return len(*p) } +func (p *pathHeap) Swap(i, j int) { + h := *p + l, r := h[i], h[j] + l.heapIdx, r.heapIdx = r.heapIdx, l.heapIdx + h[i], h[j] = r, l +} + +// For our path, we always want to prioritize stealing a partition we +// originally owned. This may result in a longer steal path, but it will +// increase stickiness. +// +// Next, our real goal, which is to find a node we can steal from. Because of +// this, we always want to sort by the highest level. The pathHeap stores +// reachable paths, so by sorting by the highest level, we terminate quicker: +// we always check the most likely candidates to quit our search. +// +// Finally, we simply prefer searching through shorter paths and, barring that, +// just sort by node. +func (p *pathHeap) Less(i, j int) bool { + l, r := (*p)[i], (*p)[j] + return l.srcIsOriginal && !r.srcIsOriginal || !l.srcIsOriginal && !r.srcIsOriginal && + (l.level > r.level || l.level == r.level && + (l.distance < r.distance || l.distance == r.distance && + l.node < r.node)) +} + +func (p *pathHeap) Push(x any) { *p = append(*p, x.(*pathScore)) } +func (p *pathHeap) Pop() any { + h := *p + l := len(h) + r := h[l-1] + *p = h[:l-1] + return r +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go new file mode 100644 index 000000000000..8d563b7873f3 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go @@ -0,0 +1,392 @@ +package sticky + +// This file contains a vendoring of github.com/twmb/go-rbtree, with interface +// types replaced with *partitionLevel. We do this to simplify (and slightly) +// speed up the rbtree, get rid of a bunch of code we do not need, and to drop +// a dep. + +type color bool + +const red, black color = true, false + +// Tree is a red-black tree. +type treePlan struct { + root *treePlanNode + size int +} + +type treePlanNode struct { + left *treePlanNode + right *treePlanNode + parent *treePlanNode + color color + item *partitionLevel +} + +// liftRightSideOf is rotateLeft. +// +// Graphically speaking, this takes the node on the right and lifts it above +// ourselves. IMO trying to visualize a "rotation" is confusing. +func (t *treePlan) liftRightSideOf(n *treePlanNode) { + r := n.right + t.relinkParenting(n, r) + + // lift the right + n.right = r.left + n.parent = r + + // fix the lifted right's left + if r.left != nil { + r.left.parent = n + } + r.left = n +} + +// liftLeftSideOf is rotateRight, renamed to aid my visualization. +func (t *treePlan) liftLeftSideOf(n *treePlanNode) { + l := n.left + t.relinkParenting(n, l) + + n.left = l.right + n.parent = l + + if l.right != nil { + l.right.parent = n + } + l.right = n +} + +// relinkParenting is called to fix a former child c of node n's parent +// relationship to the parent of n. +// +// After this, the n node can be considered to have no parent. +func (t *treePlan) relinkParenting(n, c *treePlanNode) { + p := n.parent + if c != nil { + c.parent = p + } + if p == nil { + t.root = c + return + } + if n == p.left { + p.left = c + } else { + p.right = c + } +} + +func (n *treePlanNode) sibling() *treePlanNode { + if n.parent == nil { + return nil + } + if n == n.parent.left { + return n.parent.right + } + return n.parent.left +} + +func (n *treePlanNode) uncle() *treePlanNode { + p := n.parent + if p.parent == nil { + return nil + } + return p.sibling() +} + +func (n *treePlanNode) grandparent() *treePlanNode { + return n.parent.parent +} + +func (n *treePlanNode) isBlack() bool { + return n == nil || n.color == black +} + +func (t *treePlan) insert(i *partitionLevel) *treePlanNode { + r := &treePlanNode{item: i} + t.reinsert(r) + return r +} + +func (t *treePlan) reinsert(n *treePlanNode) { + *n = treePlanNode{ + color: red, + item: n.item, + } + t.size++ + if t.root == nil { + n.color = black + t.root = n + return + } + + on := t.root + var set **treePlanNode + for { + if n.item.less(on.item) { + if on.left == nil { + set = &on.left + break + } + on = on.left + } else { + if on.right == nil { + set = &on.right + break + } + on = on.right + } + } + + n.parent = on + *set = n + +repair: + // Case 1: we have jumped back to the root. Paint it black. + if n.parent == nil { + n.color = black + return + } + + // Case 2: if our parent is black, us being red does not add a new black + // to the chain and cannot increase the maximum number of blacks from + // root, so we are done. + if n.parent.color == black { + return + } + + // Case 3: if we have an uncle and it is red, then we flip our + // parent's, uncle's, and grandparent's color. + // + // This stops the red-red from parent to us, but may introduce + // a red-red from grandparent to its parent, so we set ourselves + // to the grandparent and go back to the repair beginning. + if uncle := n.uncle(); uncle != nil && uncle.color == red { + n.parent.color = black + uncle.color = black + n = n.grandparent() + n.color = red + goto repair + } + + // Case 4 step 1: our parent is red but uncle is black. Step 2 relies + // on the node being on the "outside". If we are on the inside, our + // parent lifts ourselves above itself, thus making the parent the + // outside, and then we become that parent. + p := n.parent + g := p.parent + if n == p.right && p == g.left { + t.liftRightSideOf(p) + n = n.left + } else if n == p.left && p == g.right { + t.liftLeftSideOf(p) + n = n.right + } + + // Care 4 step 2: we are on the outside, and we and our parent are red. + // If we are on the left, our grandparent lifts its left and then swaps + // its and our parent's colors. + // + // This fixes the red-red situation while preserving the number of + // blacks from root to leaf property. + p = n.parent + g = p.parent + + if n == p.left { + t.liftLeftSideOf(g) + } else { + t.liftRightSideOf(g) + } + p.color = black + g.color = red +} + +func (t *treePlan) delete(n *treePlanNode) { + t.size-- + + // We only want to delete nodes with at most one child. If this has + // two, we find the max node on the left, set this node's item to that + // node's item, and then delete that max node. + if n.left != nil && n.right != nil { + remove := n.left.max() + n.item, remove.item = remove.item, n.item + n = remove + } + + // Determine which child to elevate into our position now that we know + // we have at most one child. + c := n.right + if n.right == nil { + c = n.left + } + + t.doDelete(n, c) + t.relinkParenting(n, c) +} + +// Since we do not represent leave nodes with objects, we relink the parent +// after deleting. See the Wikipedia note. Most of our deletion operations +// on n (the dubbed "shadow" node) rather than c. +func (t *treePlan) doDelete(n, c *treePlanNode) { + // If the node was red, we deleted a red node; the number of black + // nodes along any path is the same and we can quit. + if n.color != black { + return + } + + // If the node was black, then, if we have a child and it is red, + // we switch the child to black to preserve the path number. + if c != nil && c.color == red { + c.color = black + return + } + + // We either do not have a child (nil is black), or we do and it + // is black. We must preserve the number of blacks. + +case1: + // Case 1: if the child is the new root, then the tree must have only + // had up to two elements and now has one or zero. We are done. + if n.parent == nil { + return + } + + // Note that if we are here, we must have a sibling. + // + // The first time through, from the deleted node, the deleted node was + // black and the child was black. This being two blacks meant that the + // original node's parent required two blacks on the other side. + // + // The second time through, through case 3, the sibling was repainted + // red... so it must still exist. + + // Case 2: if the child's sibling is red, we recolor the parent and + // sibling and lift the sibling, ensuring we have a black sibling. + s := n.sibling() + if s.color == red { + n.parent.color = red + s.color = black + if n == n.parent.left { + t.liftRightSideOf(n.parent) + } else { + t.liftLeftSideOf(n.parent) + } + s = n.sibling() + } + + // Right here, we know the sibling is black. If both sibling children + // are black or nil leaves (black), we enter cases 3 and 4. + if s.left.isBlack() && s.right.isBlack() { + // Case 3: if the parent, sibling, sibling's children are + // black, we can paint the sibling red to fix the imbalance. + // However, the same black imbalance can exist on the other + // side of the parent, so we go back to case 1 on the parent. + s.color = red + if n.parent.color == black { + n = n.parent + goto case1 + } + + // Case 4: if the sibling and sibling's children are black, but + // the parent is red, We can swap parent and sibling colors to + // fix our imbalance. We have no worry of further imbalances up + // the tree since we deleted a black node, replaced it with a + // red node, and then painted that red node black. + n.parent.color = black + return + } + + // Now we know the sibling is black and one of its children is red. + + // Case 5: in preparation for 6, if we are on the left, we want our + // sibling, if it has a right child, for that child's color to be red. + // We swap the sibling and sibling's left's color (since we know the + // sibling has a red child and that the right is black) and we lift the + // left child. + // + // This keeps the same number of black nodes and under the sibling. + if n == n.parent.left && s.right.isBlack() { + s.color = red + s.left.color = black + t.liftLeftSideOf(s) + } else if n == n.parent.right && s.left.isBlack() { + s.color = red + s.right.color = black + t.liftRightSideOf(s) + } + s = n.sibling() // can change from the above case + + // At this point, we know we have a black sibling and, if we are on + // the left, it has a red child on its right. + + // Case 6: we lift the sibling above the parent, swap the sibling's and + // parent's color, and change the sibling's right's color from red to + // black. + // + // This brings in a black above our node to replace the one we deleted, + // while preserves the number of blacks on the other side of the path. + s.color = n.parent.color + n.parent.color = black + if n == n.parent.left { + s.right.color = black + t.liftRightSideOf(n.parent) + } else { + s.left.color = black + t.liftLeftSideOf(n.parent) + } +} + +func (t *treePlan) findWith(cmp func(*partitionLevel) int) *treePlanNode { + on := t.root + for on != nil { + way := cmp(on.item) + switch { + case way < 0: + on = on.left + case way == 0: + return on + case way > 0: + on = on.right + } + } + return nil +} + +func (t *treePlan) findWithOrInsertWith( + find func(*partitionLevel) int, + insert func() *partitionLevel, +) *treePlanNode { + found := t.findWith(find) + if found == nil { + return t.insert(insert()) + } + return found +} + +func (t *treePlan) min() *treePlanNode { + if t.root == nil { + return nil + } + return t.root.min() +} + +func (n *treePlanNode) min() *treePlanNode { + for n.left != nil { + n = n.left + } + return n +} + +func (t *treePlan) max() *treePlanNode { + if t.root == nil { + return nil + } + return t.root.max() +} + +func (n *treePlanNode) max() *treePlanNode { + for n.right != nil { + n = n.right + } + return n +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go new file mode 100644 index 000000000000..a502a2e5613d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go @@ -0,0 +1,733 @@ +// Package sticky provides sticky partitioning strategy for Kafka, with a +// complete overhaul to be faster, more understandable, and optimal. +// +// For some points on how Java's strategy is flawed, see +// https://github.com/IBM/sarama/pull/1416/files/b29086bdaae0da7ce71eae3f854d50685fd6b631#r315005878 +package sticky + +import ( + "math" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Sticky partitioning has two versions, the latter from KIP-341 preventing a +// bug. The second version introduced generations with the default generation +// from the first generation's consumers defaulting to -1. + +// We can support up to 65533 members; two slots are reserved. +// We can support up to 2,147,483,647 partitions. +// I expect a server to fall over before reaching either of these numbers. + +// GroupMember is a Kafka group member. +type GroupMember struct { + ID string + Topics []string + UserData []byte + Owned []kmsg.ConsumerMemberMetadataOwnedPartition + Generation int32 + Cooperative bool +} + +// Plan is the plan this package came up with (member => topic => partitions). +type Plan map[string]map[string][]int32 + +type balancer struct { + // members are the members in play for this balance. + // This is built in newBalancer mapping member IDs to the GroupMember. + members []GroupMember + + memberNums map[string]uint16 // member id => index into members + + topicNums map[string]uint32 // topic name => index into topicInfos + topicInfos []topicInfo + partOwners []uint32 // partition => owning topicNum + + // Stales tracks partNums that are doubly subscribed in this join + // where one of the subscribers is on an old generation. + // + // The newer generation goes into plan directly, the older gets + // stuffed here. + stales map[int32]uint16 // partNum => stale memberNum + + plan membersPartitions // what we are building and balancing + + // planByNumPartitions orders plan members into partition count levels. + // + // The nodes in the tree reference values in plan, meaning updates in + // this field are visible in plan. + planByNumPartitions treePlan + + // if the subscriptions are complex (all members do _not_ consume the + // same partitions), then we build a graph and use that for assigning. + isComplex bool + + // stealGraph is a graphical representation of members and partitions + // they want to steal. + stealGraph graph +} + +type topicInfo struct { + partNum int32 // base part num + partitions int32 // number of partitions in the topic + topic string +} + +func newBalancer(members []GroupMember, topics map[string]int32) *balancer { + var ( + nparts int + topicNums = make(map[string]uint32, len(topics)) + topicInfos = make([]topicInfo, len(topics)) + ) + for topic, partitions := range topics { + topicNum := uint32(len(topicNums)) + topicNums[topic] = topicNum + topicInfos[topicNum] = topicInfo{ + partNum: int32(nparts), + partitions: partitions, + topic: topic, + } + nparts += int(partitions) + } + partOwners := make([]uint32, 0, nparts) + for topicNum, info := range topicInfos { + for i := int32(0); i < info.partitions; i++ { + partOwners = append(partOwners, uint32(topicNum)) + } + } + memberNums := make(map[string]uint16, len(members)) + for num, member := range members { + memberNums[member.ID] = uint16(num) + } + + b := &balancer{ + members: members, + memberNums: memberNums, + topicNums: topicNums, + topicInfos: topicInfos, + + partOwners: partOwners, + stales: make(map[int32]uint16), + plan: make(membersPartitions, len(members)), + } + + evenDivvy := nparts/len(members) + 1 + planBuf := make(memberPartitions, evenDivvy*len(members)) + for num := range members { + b.plan[num] = planBuf[:0:evenDivvy] + planBuf = planBuf[evenDivvy:] + } + return b +} + +func (b *balancer) into() Plan { + plan := make(Plan, len(b.plan)) + ntopics := 5 * len(b.topicNums) / 4 + + for memberNum, partNums := range b.plan { + member := b.members[memberNum].ID + if len(partNums) == 0 { + plan[member] = make(map[string][]int32, 0) + continue + } + topics := make(map[string][]int32, ntopics) + plan[member] = topics + + // partOwners is created by topic, and partNums refers to + // indices in partOwners. If we sort by partNum, we have sorted + // topics and partitions. + sortPartNums(partNums) + + // We can reuse partNums for our topic partitions. + topicParts := partNums[:0] + + lastTopicNum := b.partOwners[partNums[0]] + lastTopicInfo := b.topicInfos[lastTopicNum] + for _, partNum := range partNums { + topicNum := b.partOwners[partNum] + + if topicNum != lastTopicNum { + topics[lastTopicInfo.topic] = topicParts[:len(topicParts):len(topicParts)] + topicParts = topicParts[len(topicParts):] + + lastTopicNum = topicNum + lastTopicInfo = b.topicInfos[topicNum] + } + + partition := partNum - lastTopicInfo.partNum + topicParts = append(topicParts, partition) + } + topics[lastTopicInfo.topic] = topicParts[:len(topicParts):len(topicParts)] + } + return plan +} + +func (b *balancer) partNumByTopic(topic string, partition int32) (int32, bool) { + topicNum, exists := b.topicNums[topic] + if !exists { + return 0, false + } + topicInfo := b.topicInfos[topicNum] + if partition >= topicInfo.partitions { + return 0, false + } + return topicInfo.partNum + partition, true +} + +// memberPartitions contains partitions for a member. +type memberPartitions []int32 + +func (m *memberPartitions) remove(needle int32) { + s := *m + var d int + for i, check := range s { + if check == needle { + d = i + break + } + } + s[d] = s[len(s)-1] + *m = s[:len(s)-1] +} + +func (m *memberPartitions) takeEnd() int32 { + s := *m + r := s[len(s)-1] + *m = s[:len(s)-1] + return r +} + +func (m *memberPartitions) add(partNum int32) { + *m = append(*m, partNum) +} + +// membersPartitions maps members to their partitions. +type membersPartitions []memberPartitions + +type partitionLevel struct { + level int + members []uint16 +} + +// partitionLevel's members field used to be a map, but removing it gains a +// slight perf boost at the cost of removing members being O(M). +// Even with the worse complexity, scanning a short list can be faster +// than managing a map, and we expect groups to not be _too_ large. +func (l *partitionLevel) removeMember(memberNum uint16) { + for i, v := range l.members { + if v == memberNum { + l.members[i] = l.members[len(l.members)-1] + l.members = l.members[:len(l.members)-1] + return + } + } +} + +func (b *balancer) findLevel(level int) *partitionLevel { + return b.planByNumPartitions.findWithOrInsertWith( + func(n *partitionLevel) int { return level - n.level }, + func() *partitionLevel { return newPartitionLevel(level) }, + ).item +} + +func (b *balancer) fixMemberLevel( + src *treePlanNode, + memberNum uint16, + partNums memberPartitions, +) { + b.removeLevelingMember(src, memberNum) + newLevel := len(partNums) + partLevel := b.findLevel(newLevel) + partLevel.members = append(partLevel.members, memberNum) +} + +func (b *balancer) removeLevelingMember( + src *treePlanNode, + memberNum uint16, +) { + src.item.removeMember(memberNum) + if len(src.item.members) == 0 { + b.planByNumPartitions.delete(src) + } +} + +func (l *partitionLevel) less(r *partitionLevel) bool { + return l.level < r.level +} + +func newPartitionLevel(level int) *partitionLevel { + return &partitionLevel{level: level} +} + +func (b *balancer) initPlanByNumPartitions() { + for memberNum, partNums := range b.plan { + partLevel := b.findLevel(len(partNums)) + partLevel.members = append(partLevel.members, uint16(memberNum)) + } +} + +// Balance performs sticky partitioning for the given group members and topics, +// returning the determined plan. +func Balance(members []GroupMember, topics map[string]int32) Plan { + if len(members) == 0 { + return make(Plan) + } + b := newBalancer(members, topics) + if cap(b.partOwners) == 0 { + return b.into() + } + b.parseMemberMetadata() + b.assignUnassignedAndInitGraph() + b.initPlanByNumPartitions() + b.balance() + return b.into() +} + +// parseMemberMetadata parses all member userdata to initialize the prior plan. +func (b *balancer) parseMemberMetadata() { + // all partitions => members that are consuming those partitions + // Each partition should only have one consumer, but a flaky member + // could rejoin with an old generation (stale user data) and say it + // is consuming something a different member is. See KIP-341. + partitionConsumersByGeneration := make([]memberGeneration, cap(b.partOwners)) + + const highBit uint32 = 1 << 31 + var memberPlan []topicPartition + var gen uint32 + + for _, member := range b.members { + // KAFKA-13715 / KIP-792: cooperative-sticky now includes a + // generation directly with the currently-owned partitions, and + // we can avoid deserializing UserData. This guards against + // some zombie issues (see KIP). + // + // The eager (sticky) balancer revokes all partitions before + // rejoining, so we cannot use Owned. + if member.Cooperative && member.Generation >= 0 { + memberPlan = memberPlan[:0] + for _, t := range member.Owned { + for _, p := range t.Partitions { + memberPlan = append(memberPlan, topicPartition{t.Topic, p}) + } + } + gen = uint32(member.Generation) + } else { + memberPlan, gen = deserializeUserData(member.UserData, memberPlan[:0]) + } + gen |= highBit + memberNum := b.memberNums[member.ID] + for _, topicPartition := range memberPlan { + partNum, exists := b.partNumByTopic(topicPartition.topic, topicPartition.partition) + if !exists { + continue + } + + // We keep the highest generation, and at most two generations. + // If something is doubly consumed, we skip it. + pcs := &partitionConsumersByGeneration[partNum] + switch { + case gen > pcs.genNew: // one consumer already, but new member has higher generation + pcs.memberOld, pcs.genOld = pcs.memberNew, pcs.genNew + pcs.memberNew, pcs.genNew = memberNum, gen + + case gen > pcs.genOld: // one consumer already, we could be second, or if there is a second, we have a high generation + pcs.memberOld, pcs.genOld = memberNum, gen + } + } + } + + for partNum, pcs := range partitionConsumersByGeneration { + if pcs.genNew&highBit != 0 { + b.plan[pcs.memberNew].add(int32(partNum)) + if pcs.genOld&highBit != 0 { + b.stales[int32(partNum)] = pcs.memberOld + } + } + } +} + +type memberGeneration struct { + memberNew uint16 + memberOld uint16 + genNew uint32 + genOld uint32 +} + +type topicPartition struct { + topic string + partition int32 +} + +// deserializeUserData returns the topic partitions a member was consuming and +// the join generation it was consuming from. +// +// If anything fails or we do not understand the userdata parsing generation, +// we return empty defaults. The member will just be assumed to have no +// history. +func deserializeUserData(userdata []byte, base []topicPartition) (memberPlan []topicPartition, generation uint32) { + memberPlan = base[:0] + b := kbin.Reader{Src: userdata} + for numAssignments := b.ArrayLen(); numAssignments > 0; numAssignments-- { + topic := b.UnsafeString() + for numPartitions := b.ArrayLen(); numPartitions > 0; numPartitions-- { + memberPlan = append(memberPlan, topicPartition{ + topic, + b.Int32(), + }) + } + } + if len(b.Src) > 0 { + // A generation of -1 is just as good of a generation as 0, so we use 0 + // and then use the high bit to signify this generation has been set. + if generationI32 := b.Int32(); generationI32 > 0 { + generation = uint32(generationI32) + } + } + if b.Complete() != nil { + memberPlan = memberPlan[:0] + } + return +} + +// assignUnassignedAndInitGraph is a long function that assigns unassigned +// partitions to the least loaded members and initializes our steal graph. +// +// Doing so requires a bunch of metadata, and in the process we want to remove +// partitions from the plan that no longer exist in the client. +func (b *balancer) assignUnassignedAndInitGraph() { + // First, over all members in this assignment, map each partition to + // the members that can consume it. We will use this for assigning. + // + // To do this mapping efficiently, we first map each topic to the + // memberNums that can consume those topics, and then use the results + // below in the partition mapping. Doing this two step process allows + // for a 10x speed boost rather than ranging over all partitions many + // times. + topicPotentialsBuf := make([]uint16, len(b.topicNums)*len(b.members)) + topicPotentials := make([][]uint16, len(b.topicNums)) + for memberNum, member := range b.members { + for _, topic := range member.Topics { + topicNum, exists := b.topicNums[topic] + if !exists { + continue + } + memberNums := topicPotentials[topicNum] + if cap(memberNums) == 0 { + memberNums = topicPotentialsBuf[:0:len(b.members)] + topicPotentialsBuf = topicPotentialsBuf[len(b.members):] + } + topicPotentials[topicNum] = append(memberNums, uint16(memberNum)) + } + } + + for _, topicMembers := range topicPotentials { + // If the number of members interested in this topic is not the + // same as the number of members in this group, then **other** + // members are interested in other topics and not this one, and + // we must go to complex balancing. + // + // We could accidentally fall into isComplex if any member is + // not interested in anything, but realistically we do not + // expect members to join with no interests. + if len(topicMembers) != len(b.members) { + b.isComplex = true + } + } + + // Next, over the prior plan, un-map deleted topics or topics that + // members no longer want. This is where we determine what is now + // unassigned. + partitionConsumers := make([]partitionConsumer, cap(b.partOwners)) // partNum => consuming member + for i := range partitionConsumers { + partitionConsumers[i] = partitionConsumer{unassignedPart, unassignedPart} + } + for memberNum := range b.plan { + partNums := &b.plan[memberNum] + for _, partNum := range *partNums { + topicNum := b.partOwners[partNum] + if len(topicPotentials[topicNum]) == 0 { // all prior subscriptions stopped wanting this partition + partNums.remove(partNum) + continue + } + memberTopics := b.members[memberNum].Topics + var memberStillWantsTopic bool + for _, memberTopic := range memberTopics { + if memberTopic == b.topicInfos[topicNum].topic { + memberStillWantsTopic = true + break + } + } + if !memberStillWantsTopic { + partNums.remove(partNum) + continue + } + partitionConsumers[partNum] = partitionConsumer{uint16(memberNum), uint16(memberNum)} + } + } + + b.tryRestickyStales(topicPotentials, partitionConsumers) + + // For each member, we now sort their current partitions by partition, + // then topic. Sorting the lowest numbers first means that once we + // steal from the end (when adding a member), we steal equally across + // all topics. This benefits the standard case the most, where all + // members consume equally. + for memberNum := range b.plan { + b.sortMemberByLiteralPartNum(memberNum) + } + + if !b.isComplex && len(topicPotentials) > 0 { + potentials := topicPotentials[0] + (&membersByPartitions{potentials, b.plan}).init() + for partNum, owner := range partitionConsumers { + if owner.memberNum != unassignedPart { + continue + } + assigned := potentials[0] + b.plan[assigned].add(int32(partNum)) + (&membersByPartitions{potentials, b.plan}).fix0() + partitionConsumers[partNum].memberNum = assigned + } + } else { + for partNum, owner := range partitionConsumers { + if owner.memberNum != unassignedPart { + continue + } + potentials := topicPotentials[b.partOwners[partNum]] + if len(potentials) == 0 { + continue + } + leastConsumingPotential := potentials[0] + leastConsuming := len(b.plan[leastConsumingPotential]) + for _, potential := range potentials[1:] { + potentialConsuming := len(b.plan[potential]) + if potentialConsuming < leastConsuming { + leastConsumingPotential = potential + leastConsuming = potentialConsuming + } + } + b.plan[leastConsumingPotential].add(int32(partNum)) + partitionConsumers[partNum].memberNum = leastConsumingPotential + } + } + + // Lastly, with everything assigned, we build our steal graph for + // balancing if needed. + if b.isComplex { + b.stealGraph = b.newGraph( + partitionConsumers, + topicPotentials, + ) + } +} + +// unassignedPart is a fake member number that we use to track if a partition +// is deleted or unassigned. +const unassignedPart = math.MaxUint16 - 1 + +// tryRestickyStales is a pre-assigning step where, for all stale members, +// we give partitions back to them if the partition is currently on an +// over loaded member or unassigned. +// +// This effectively re-stickies members before we balance further. +func (b *balancer) tryRestickyStales( + topicPotentials [][]uint16, + partitionConsumers []partitionConsumer, +) { + for staleNum, lastOwnerNum := range b.stales { + potentials := topicPotentials[b.partOwners[staleNum]] // there must be a potential consumer if we are here + var canTake bool + for _, potentialNum := range potentials { + if potentialNum == lastOwnerNum { + canTake = true + } + } + if !canTake { + return + } + + // The part cannot be unassigned here; a stale member + // would just have it. The part also cannot be deleted; + // if it is, there are no potential consumers and the + // logic above continues before getting here. The part + // must be on a different owner (cannot be lastOwner), + // otherwise it would not be a lastOwner in the stales + // map; it would just be the current owner. + currentOwner := partitionConsumers[staleNum].memberNum + lastOwnerPartitions := &b.plan[lastOwnerNum] + currentOwnerPartitions := &b.plan[currentOwner] + if len(*lastOwnerPartitions)+1 < len(*currentOwnerPartitions) { + currentOwnerPartitions.remove(staleNum) + lastOwnerPartitions.add(staleNum) + } + } +} + +type partitionConsumer struct { + memberNum uint16 + originalNum uint16 +} + +// While assigning, we keep members per topic heap sorted by the number of +// partitions they are currently consuming. This allows us to have quick +// assignment vs. always scanning to see the min loaded member. +// +// Our process is to init the heap and then always fix the 0th index after +// making it larger, so we only ever need to sift down. +type membersByPartitions struct { + members []uint16 + plan membersPartitions +} + +func (m *membersByPartitions) init() { + n := len(m.members) + for i := n/2 - 1; i >= 0; i-- { + m.down(i, n) + } +} + +func (m *membersByPartitions) fix0() { + m.down(0, len(m.members)) +} + +func (m *membersByPartitions) down(i0, n int) { + node := i0 + for { + left := 2*node + 1 + if left >= n || left < 0 { // left < 0 after int overflow + break + } + swap := left // left child + swapLen := len(m.plan[m.members[left]]) + if right := left + 1; right < n { + if rightLen := len(m.plan[m.members[right]]); rightLen < swapLen { + swapLen = rightLen + swap = right + } + } + nodeLen := len(m.plan[m.members[node]]) + if nodeLen <= swapLen { + break + } + m.members[node], m.members[swap] = m.members[swap], m.members[node] + node = swap + } +} + +// balance loops trying to move partitions until the plan is as balanced +// as it can be. +func (b *balancer) balance() { + if b.isComplex { + b.balanceComplex() + return + } + + // If all partitions are consumed equally, we have a very easy + // algorithm to balance: while the min and max levels are separated + // by over two, take from the top and give to the bottom. + min := b.planByNumPartitions.min().item + max := b.planByNumPartitions.max().item + for { + if max.level <= min.level+1 { + return + } + + minMems := min.members + maxMems := max.members + for len(minMems) > 0 && len(maxMems) > 0 { + dst := minMems[0] + src := maxMems[0] + + minMems = minMems[1:] + maxMems = maxMems[1:] + + srcPartitions := &b.plan[src] + dstPartitions := &b.plan[dst] + + dstPartitions.add(srcPartitions.takeEnd()) + } + + nextUp := b.findLevel(min.level + 1) + nextDown := b.findLevel(max.level - 1) + + endOfUps := len(min.members) - len(minMems) + endOfDowns := len(max.members) - len(maxMems) + + nextUp.members = append(nextUp.members, min.members[:endOfUps]...) + nextDown.members = append(nextDown.members, max.members[:endOfDowns]...) + + min.members = min.members[endOfUps:] + max.members = max.members[endOfDowns:] + + if len(min.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.min()) + min = b.planByNumPartitions.min().item + } + if len(max.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.max()) + max = b.planByNumPartitions.max().item + } + } +} + +func (b *balancer) balanceComplex() { + for min := b.planByNumPartitions.min(); b.planByNumPartitions.size > 1; min = b.planByNumPartitions.min() { + level := min.item + // If this max level is within one of this level, then nothing + // can steal down so we return early. + max := b.planByNumPartitions.max().item + if max.level <= level.level+1 { + return + } + // We continually loop over this level until every member is + // static (deleted) or bumped up a level. + for len(level.members) > 0 { + memberNum := level.members[0] + if stealPath, found := b.stealGraph.findSteal(memberNum); found { + for _, segment := range stealPath { + b.reassignPartition(segment.src, segment.dst, segment.part) + } + if len(max.members) == 0 { + break + } + continue + } + + // If we could not find a steal path, this + // member is not static (will never grow). + level.removeMember(memberNum) + if len(level.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.min()) + } + } + } +} + +func (b *balancer) reassignPartition(src, dst uint16, partNum int32) { + srcPartitions := &b.plan[src] + dstPartitions := &b.plan[dst] + + oldSrcLevel := len(*srcPartitions) + oldDstLevel := len(*dstPartitions) + + srcPartitions.remove(partNum) + dstPartitions.add(partNum) + + b.fixMemberLevel( + b.planByNumPartitions.findWith(func(n *partitionLevel) int { + return oldSrcLevel - n.level + }), + src, + *srcPartitions, + ) + b.fixMemberLevel( + b.planByNumPartitions.findWith(func(n *partitionLevel) int { + return oldDstLevel - n.level + }), + dst, + *dstPartitions, + ) + + b.stealGraph.changeOwnership(partNum, dst) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go b/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go new file mode 100644 index 000000000000..bfc5dc0dd6b0 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go @@ -0,0 +1,124 @@ +package kgo + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// LogLevel designates which level the logger should log at. +type LogLevel int8 + +const ( + // LogLevelNone disables logging. + LogLevelNone LogLevel = iota + // LogLevelError logs all errors. Generally, these should not happen. + LogLevelError + // LogLevelWarn logs all warnings, such as request failures. + LogLevelWarn + // LogLevelInfo logs informational messages, such as requests. This is + // usually the default log level. + LogLevelInfo + // LogLevelDebug logs verbose information, and is usually not used in + // production. + LogLevelDebug +) + +func (l LogLevel) String() string { + switch l { + case LogLevelError: + return "ERROR" + case LogLevelWarn: + return "WARN" + case LogLevelInfo: + return "INFO" + case LogLevelDebug: + return "DEBUG" + default: + return "NONE" + } +} + +// Logger is used to log informational messages. +type Logger interface { + // Level returns the log level to log at. + // + // Implementations can change their log level on the fly, but this + // function must be safe to call concurrently. + Level() LogLevel + + // Log logs a message with key, value pair arguments for the given log + // level. Keys are always strings, while values can be any type. + // + // This must be safe to call concurrently. + Log(level LogLevel, msg string, keyvals ...any) +} + +// BasicLogger returns a logger that will print to dst in the following format: +// +// prefix [LEVEL] message; key: val, key: val +// +// prefixFn is optional; if non-nil, it is called for a per-message prefix. +// +// Writes to dst are not checked for errors. +func BasicLogger(dst io.Writer, level LogLevel, prefixFn func() string) Logger { + return &basicLogger{dst, level, prefixFn} +} + +type basicLogger struct { + dst io.Writer + level LogLevel + pfxFn func() string +} + +func (b *basicLogger) Level() LogLevel { return b.level } +func (b *basicLogger) Log(level LogLevel, msg string, keyvals ...any) { + buf := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(buf) + + buf.Reset() + if b.pfxFn != nil { + buf.WriteString(b.pfxFn()) + } + buf.WriteByte('[') + buf.WriteString(level.String()) + buf.WriteString("] ") + buf.WriteString(msg) + + if len(keyvals) > 0 { + buf.WriteString("; ") + format := strings.Repeat("%v: %v, ", len(keyvals)/2) + format = format[:len(format)-2] // trim trailing comma and space + fmt.Fprintf(buf, format, keyvals...) + } + + buf.WriteByte('\n') + b.dst.Write(buf.Bytes()) +} + +// nopLogger, the default logger, drops everything. +type nopLogger struct{} + +func (*nopLogger) Level() LogLevel { return LogLevelNone } +func (*nopLogger) Log(LogLevel, string, ...any) { +} + +// wrappedLogger wraps the config logger for convenience at logging callsites. +type wrappedLogger struct { + inner Logger +} + +func (w *wrappedLogger) Level() LogLevel { + if w.inner == nil { + return LogLevelNone + } + return w.inner.Level() +} + +func (w *wrappedLogger) Log(level LogLevel, msg string, keyvals ...any) { + if w.Level() < level { + return + } + w.inner.Log(level, msg, keyvals...) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go b/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go new file mode 100644 index 000000000000..33cac6414f94 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go @@ -0,0 +1,966 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kerr" +) + +type metawait struct { + mu sync.Mutex + c *sync.Cond + lastUpdate time.Time +} + +func (m *metawait) init() { m.c = sync.NewCond(&m.mu) } +func (m *metawait) signal() { + m.mu.Lock() + m.lastUpdate = time.Now() + m.mu.Unlock() + m.c.Broadcast() +} + +// ForceMetadataRefresh triggers the client to update the metadata that is +// currently used for producing & consuming. +// +// Internally, the client already properly triggers metadata updates whenever a +// partition is discovered to be out of date (leader moved, epoch is old, etc). +// However, when partitions are added to a topic through a CreatePartitions +// request, it may take up to MetadataMaxAge for the new partitions to be +// discovered. In this case, you may want to forcefully refresh metadata +// manually to discover these new partitions sooner. +func (cl *Client) ForceMetadataRefresh() { + cl.triggerUpdateMetadataNow("from user ForceMetadataRefresh") +} + +// PartitionLeader returns the given topic partition's leader, leader epoch and +// load error. This returns -1, -1, nil if the partition has not been loaded. +func (cl *Client) PartitionLeader(topic string, partition int32) (leader, leaderEpoch int32, err error) { + if partition < 0 { + return -1, -1, errors.New("invalid negative partition") + } + + var t *topicPartitions + + m := cl.producer.topics.load() + if len(m) > 0 { + t = m[topic] + } + if t == nil { + if cl.consumer.g != nil { + if m = cl.consumer.g.tps.load(); len(m) > 0 { + t = m[topic] + } + } else if cl.consumer.d != nil { + if m = cl.consumer.d.tps.load(); len(m) > 0 { + t = m[topic] + } + } + if t == nil { + return -1, -1, nil + } + } + + tv := t.load() + if len(tv.partitions) <= int(partition) { + return -1, -1, tv.loadErr + } + p := tv.partitions[partition] + return p.leader, p.leaderEpoch, p.loadErr +} + +// waitmeta returns immediately if metadata was updated within the last second, +// otherwise this waits for up to wait for a metadata update to complete. +func (cl *Client) waitmeta(ctx context.Context, wait time.Duration, why string) { + now := time.Now() + + cl.metawait.mu.Lock() + if now.Sub(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + cl.metawait.mu.Unlock() + return + } + cl.metawait.mu.Unlock() + + cl.triggerUpdateMetadataNow(why) + + quit := false + done := make(chan struct{}) + timeout := time.NewTimer(wait) + defer timeout.Stop() + + go func() { + defer close(done) + cl.metawait.mu.Lock() + defer cl.metawait.mu.Unlock() + + for !quit { + if now.Sub(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + return + } + cl.metawait.c.Wait() + } + }() + + select { + case <-done: + return + case <-timeout.C: + case <-ctx.Done(): + case <-cl.ctx.Done(): + } + + cl.metawait.mu.Lock() + quit = true + cl.metawait.mu.Unlock() + cl.metawait.c.Broadcast() +} + +func (cl *Client) triggerUpdateMetadata(must bool, why string) bool { + if !must { + cl.metawait.mu.Lock() + defer cl.metawait.mu.Unlock() + if time.Since(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + return false + } + } + + select { + case cl.updateMetadataCh <- why: + default: + } + return true +} + +func (cl *Client) triggerUpdateMetadataNow(why string) { + select { + case cl.updateMetadataNowCh <- why: + default: + } +} + +func (cl *Client) blockingMetadataFn(fn func()) { + var wg sync.WaitGroup + wg.Add(1) + waitfn := func() { + defer wg.Done() + fn() + } + select { + case cl.blockingMetadataFnCh <- waitfn: + wg.Wait() + case <-cl.ctx.Done(): + } +} + +// updateMetadataLoop updates metadata whenever the update ticker ticks, +// or whenever deliberately triggered. +func (cl *Client) updateMetadataLoop() { + defer close(cl.metadone) + var consecutiveErrors int + var lastAt time.Time + + ticker := time.NewTicker(cl.cfg.metadataMaxAge) + defer ticker.Stop() +loop: + for { + var now bool + select { + case <-cl.ctx.Done(): + return + case <-ticker.C: + // We do not log on the standard update case. + case why := <-cl.updateMetadataCh: + cl.cfg.logger.Log(LogLevelInfo, "metadata update triggered", "why", why) + case why := <-cl.updateMetadataNowCh: + cl.cfg.logger.Log(LogLevelInfo, "immediate metadata update triggered", "why", why) + now = true + case fn := <-cl.blockingMetadataFnCh: + fn() + continue loop + } + + var nowTries int + start: + nowTries++ + if !now { + if wait := cl.cfg.metadataMinAge - time.Since(lastAt); wait > 0 { + timer := time.NewTimer(wait) + prewait: + select { + case <-cl.ctx.Done(): + timer.Stop() + return + case why := <-cl.updateMetadataNowCh: + timer.Stop() + cl.cfg.logger.Log(LogLevelInfo, "immediate metadata update triggered, bypassing normal wait", "why", why) + case <-timer.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto prewait + } + } + } + + // Even with an "update now", we sleep just a bit to allow some + // potential pile on now triggers. + time.Sleep(time.Until(lastAt.Add(10 * time.Millisecond))) + + // Drain any refires that occurred during our waiting. + out: + for { + select { + case <-cl.updateMetadataCh: + case <-cl.updateMetadataNowCh: + case fn := <-cl.blockingMetadataFnCh: + fn() + default: + break out + } + } + + retryWhy, err := cl.updateMetadata() + if retryWhy != nil || err != nil { + // If err is non-nil, the metadata request failed + // itself and already retried 3x; we do not loop more. + // + // If err is nil, the a topic or partition had a load + // error and is perhaps still being created. We retry a + // few more times to give Kafka a chance to figure + // things out. By default this will put us at 2s of + // looping+waiting (250ms per wait, 8x), and if things + // still fail we will fall into the slower update below + // which waits (default) 5s between tries. + if now && err == nil && nowTries < 8 { + wait := 250 * time.Millisecond + if cl.cfg.metadataMinAge < wait { + wait = cl.cfg.metadataMinAge + } + cl.cfg.logger.Log(LogLevelDebug, "immediate metadata update had inner errors, re-updating", + "errors", retryWhy.reason(""), + "update_after", wait, + ) + timer := time.NewTimer(wait) + quickbackoff: + select { + case <-cl.ctx.Done(): + timer.Stop() + return + case <-timer.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto quickbackoff + } + goto start + } + if err != nil { + cl.triggerUpdateMetadata(true, fmt.Sprintf("re-updating metadata due to err: %s", err)) + } else { + cl.triggerUpdateMetadata(true, retryWhy.reason("re-updating due to inner errors")) + } + } + if err == nil { + cl.metawait.signal() + cl.consumer.doOnMetadataUpdate() + lastAt = time.Now() + consecutiveErrors = 0 + continue + } + + consecutiveErrors++ + after := time.NewTimer(cl.cfg.retryBackoff(consecutiveErrors)) + backoff: + select { + case <-cl.ctx.Done(): + after.Stop() + return + case <-after.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto backoff + } + } +} + +var errMissingTopic = errors.New("topic_missing") + +// Updates all producer and consumer partition data, returning whether a new +// update needs scheduling or if an error occurred. +// +// The producer and consumer use different topic maps and underlying +// topicPartitionsData pointers, but we update those underlying pointers +// equally. +func (cl *Client) updateMetadata() (retryWhy multiUpdateWhy, err error) { + var ( + tpsProducerLoad = cl.producer.topics.load() + tpsConsumer *topicsPartitions + groupExternal *groupExternal + all = cl.cfg.regex + reqTopics []string + ) + c := &cl.consumer + switch { + case c.g != nil: + tpsConsumer = c.g.tps + groupExternal = c.g.loadExternal() + case c.d != nil: + tpsConsumer = c.d.tps + } + + if !all { + reqTopicsSet := make(map[string]struct{}) + for _, m := range []map[string]*topicPartitions{ + tpsProducerLoad, + tpsConsumer.load(), + } { + for topic := range m { + reqTopicsSet[topic] = struct{}{} + } + } + groupExternal.eachTopic(func(t string) { + reqTopicsSet[t] = struct{}{} + }) + reqTopics = make([]string, 0, len(reqTopicsSet)) + for topic := range reqTopicsSet { + reqTopics = append(reqTopics, topic) + } + } + + latest, err := cl.fetchTopicMetadata(all, reqTopics) + if err != nil { + cl.bumpMetadataFailForTopics( // bump load failures for all topics + tpsProducerLoad, + err, + ) + return nil, err + } + groupExternal.updateLatest(latest) + + // If we are consuming with regex and fetched all topics, the metadata + // may have returned topics the consumer is not yet tracking. We ensure + // that we will store the topics at the end of our metadata update. + tpsConsumerLoad := tpsConsumer.load() + if all { + allTopics := make([]string, 0, len(latest)) + for topic := range latest { + allTopics = append(allTopics, topic) + } + tpsConsumerLoad = tpsConsumer.ensureTopics(allTopics) + defer tpsConsumer.storeData(tpsConsumerLoad) + + // For regex consuming, if a topic is not returned in the + // response and for at least missingTopicDelete from when we + // first discovered it, we assume the topic has been deleted + // and purge it. We allow for missingTopicDelete because (in + // testing locally) Kafka can originally broadcast a newly + // created topic exists and then fail to broadcast that info + // again for a while. + var purgeTopics []string + for topic, tps := range tpsConsumerLoad { + if _, ok := latest[topic]; !ok { + if td := tps.load(); td.when != 0 && time.Since(time.Unix(td.when, 0)) > cl.cfg.missingTopicDelete { + purgeTopics = append(purgeTopics, td.topic) + } else { + retryWhy.add(topic, -1, errMissingTopic) + } + } + } + if len(purgeTopics) > 0 { + // We have to `go` because Purge issues a blocking + // metadata fn; this will wait for our current + // execution to finish then purge. + cl.cfg.logger.Log(LogLevelInfo, "regex consumer purging topics that were previously consumed because they are missing in a metadata response, we are assuming they are deleted", "topics", purgeTopics) + go cl.PurgeTopicsFromClient(purgeTopics...) + } + } + + css := &consumerSessionStopper{cl: cl} + defer css.maybeRestart() + + var missingProduceTopics []*topicPartitions + for _, m := range []struct { + priors map[string]*topicPartitions + isProduce bool + }{ + {tpsProducerLoad, true}, + {tpsConsumerLoad, false}, + } { + for topic, priorParts := range m.priors { + newParts, exists := latest[topic] + if !exists { + if m.isProduce { + missingProduceTopics = append(missingProduceTopics, priorParts) + } + continue + } + cl.mergeTopicPartitions( + topic, + priorParts, + newParts, + m.isProduce, + css, + &retryWhy, + ) + } + } + + // For all produce topics that were missing, we want to bump their + // retries that a failure happened. However, if we are regex consuming, + // then it is possible in a rare scenario for the broker to not return + // a topic that actually does exist and that we previously received a + // metadata response for. This is handled above for consuming, we now + // handle it the same way for consuming. + if len(missingProduceTopics) > 0 { + var bumpFail []string + for _, tps := range missingProduceTopics { + if all { + if td := tps.load(); td.when != 0 && time.Since(time.Unix(td.when, 0)) > cl.cfg.missingTopicDelete { + bumpFail = append(bumpFail, td.topic) + } else { + retryWhy.add(td.topic, -1, errMissingTopic) + } + } else { + bumpFail = append(bumpFail, tps.load().topic) + } + } + if len(bumpFail) > 0 { + cl.bumpMetadataFailForTopics( + tpsProducerLoad, + fmt.Errorf("metadata request did not return topics: %v", bumpFail), + bumpFail..., + ) + } + } + + return retryWhy, nil +} + +// We use a special structure to repesent metadata before we *actually* convert +// it to topicPartitionsData. This helps avoid any pointer reuse problems +// because we want to keep the client's producer and consumer maps completely +// independent. If we just returned map[string]*topicPartitionsData, we could +// end up in some really weird pointer reuse scenario that ultimately results +// in a bug. +// +// See #190 for more details, as well as the commit message introducing this. +type metadataTopic struct { + loadErr error + isInternal bool + topic string + partitions []metadataPartition +} + +func (mt *metadataTopic) newPartitions(cl *Client, isProduce bool) *topicPartitionsData { + n := len(mt.partitions) + ps := &topicPartitionsData{ + loadErr: mt.loadErr, + isInternal: mt.isInternal, + partitions: make([]*topicPartition, 0, n), + writablePartitions: make([]*topicPartition, 0, n), + topic: mt.topic, + when: time.Now().Unix(), + } + for i := range mt.partitions { + p := mt.partitions[i].newPartition(cl, isProduce) + ps.partitions = append(ps.partitions, p) + if p.loadErr == nil { + ps.writablePartitions = append(ps.writablePartitions, p) + } + } + return ps +} + +type metadataPartition struct { + topic string + topicID [16]byte + partition int32 + loadErr int16 + leader int32 + leaderEpoch int32 + sns sinkAndSource +} + +func (mp metadataPartition) newPartition(cl *Client, isProduce bool) *topicPartition { + td := topicPartitionData{ + leader: mp.leader, + leaderEpoch: mp.leaderEpoch, + } + p := &topicPartition{ + loadErr: kerr.ErrorForCode(mp.loadErr), + topicPartitionData: td, + } + if isProduce { + p.records = &recBuf{ + cl: cl, + topic: mp.topic, + partition: mp.partition, + maxRecordBatchBytes: cl.maxRecordBatchBytesForTopic(mp.topic), + recBufsIdx: -1, + failing: mp.loadErr != 0, + sink: mp.sns.sink, + topicPartitionData: td, + } + } else { + p.cursor = &cursor{ + topic: mp.topic, + topicID: mp.topicID, + partition: mp.partition, + keepControl: cl.cfg.keepControl, + cursorsIdx: -1, + source: mp.sns.source, + topicPartitionData: td, + cursorOffset: cursorOffset{ + offset: -1, // required to not consume until needed + lastConsumedEpoch: -1, // required sentinel + }, + } + } + return p +} + +// fetchTopicMetadata fetches metadata for all reqTopics and returns new +// topicPartitionsData for each topic. +func (cl *Client) fetchTopicMetadata(all bool, reqTopics []string) (map[string]*metadataTopic, error) { + _, meta, err := cl.fetchMetadataForTopics(cl.ctx, all, reqTopics) + if err != nil { + return nil, err + } + + // Since we've fetched the metadata for some topics we can optimistically cache it + // for mapped metadata too. This may reduce the number of Metadata requests issued + // by the client. + cl.storeCachedMappedMetadata(meta, nil) + + topics := make(map[string]*metadataTopic, len(meta.Topics)) + + // Even if metadata returns a leader epoch, we do not use it unless we + // can validate it per OffsetForLeaderEpoch. Some brokers may have an + // odd set of support. + useLeaderEpoch := cl.supportsOffsetForLeaderEpoch() + + for i := range meta.Topics { + topicMeta := &meta.Topics[i] + if topicMeta.Topic == nil { + cl.cfg.logger.Log(LogLevelWarn, "metadata response contained nil topic name even though we did not request with topic IDs, skipping") + continue + } + topic := *topicMeta.Topic + + mt := &metadataTopic{ + loadErr: kerr.ErrorForCode(topicMeta.ErrorCode), + isInternal: topicMeta.IsInternal, + topic: topic, + partitions: make([]metadataPartition, 0, len(topicMeta.Partitions)), + } + + topics[topic] = mt + + if mt.loadErr != nil { + continue + } + + // This 249 limit is in Kafka itself, we copy it here to rely on it while producing. + if len(topic) > 249 { + mt.loadErr = fmt.Errorf("invalid long topic name of (len %d) greater than max allowed 249", len(topic)) + continue + } + + // Kafka partitions are strictly increasing from 0. We enforce + // that here; if any partition is missing, we consider this + // topic a load failure. + sort.Slice(topicMeta.Partitions, func(i, j int) bool { + return topicMeta.Partitions[i].Partition < topicMeta.Partitions[j].Partition + }) + for i := range topicMeta.Partitions { + if got := topicMeta.Partitions[i].Partition; got != int32(i) { + mt.loadErr = fmt.Errorf("kafka did not reply with a comprensive set of partitions for a topic; we expected partition %d but saw %d", i, got) + break + } + } + + if mt.loadErr != nil { + continue + } + + for i := range topicMeta.Partitions { + partMeta := &topicMeta.Partitions[i] + leaderEpoch := partMeta.LeaderEpoch + if meta.Version < 7 || !useLeaderEpoch { + leaderEpoch = -1 + } + mp := metadataPartition{ + topic: topic, + topicID: topicMeta.TopicID, + partition: partMeta.Partition, + loadErr: partMeta.ErrorCode, + leader: partMeta.Leader, + leaderEpoch: leaderEpoch, + } + if mp.loadErr != 0 { + mp.leader = unknownSeedID(0) // ensure every records & cursor can use a sink or source + } + cl.sinksAndSourcesMu.Lock() + sns, exists := cl.sinksAndSources[mp.leader] + if !exists { + sns = sinkAndSource{ + sink: cl.newSink(mp.leader), + source: cl.newSource(mp.leader), + } + cl.sinksAndSources[mp.leader] = sns + } + for _, replica := range partMeta.Replicas { + if replica < 0 { + continue + } + if _, exists = cl.sinksAndSources[replica]; !exists { + cl.sinksAndSources[replica] = sinkAndSource{ + sink: cl.newSink(replica), + source: cl.newSource(replica), + } + } + } + cl.sinksAndSourcesMu.Unlock() + mp.sns = sns + mt.partitions = append(mt.partitions, mp) + } + } + + return topics, nil +} + +// mergeTopicPartitions merges a new topicPartition into an old and returns +// whether the metadata update that caused this merge needs to be retried. +// +// Retries are necessary if the topic or any partition has a retryable error. +func (cl *Client) mergeTopicPartitions( + topic string, + l *topicPartitions, + mt *metadataTopic, + isProduce bool, + css *consumerSessionStopper, + retryWhy *multiUpdateWhy, +) { + lv := *l.load() // copy so our field writes do not collide with reads + + r := mt.newPartitions(cl, isProduce) + + // Producers must store the update through a special function that + // manages unknown topic waiting, whereas consumers can just simply + // store the update. + if isProduce { + hadPartitions := len(lv.partitions) != 0 + defer func() { cl.storePartitionsUpdate(topic, l, &lv, hadPartitions) }() + } else { + defer l.v.Store(&lv) + } + + lv.loadErr = r.loadErr + lv.isInternal = r.isInternal + lv.topic = r.topic + if lv.when == 0 { + lv.when = r.when + } + + // If the load had an error for the entire topic, we set the load error + // but keep our stale partition information. For anything being + // produced, we bump the respective error or fail everything. There is + // nothing to be done in a consumer. + if r.loadErr != nil { + if isProduce { + for _, topicPartition := range lv.partitions { + topicPartition.records.bumpRepeatedLoadErr(lv.loadErr) + } + } else if !kerr.IsRetriable(r.loadErr) || cl.cfg.keepRetryableFetchErrors { + cl.consumer.addFakeReadyForDraining(topic, -1, r.loadErr, "metadata refresh has a load error on this entire topic") + } + retryWhy.add(topic, -1, r.loadErr) + return + } + + // Before the atomic update, we keep the latest partitions / writable + // partitions. All updates happen in r's slices, and we keep the + // results and store them in lv. + defer func() { + lv.partitions = r.partitions + lv.writablePartitions = r.writablePartitions + }() + + // We should have no deleted partitions, but there are two cases where + // we could. + // + // 1) an admin added partitions, we saw, then we re-fetched metadata + // from an out of date broker that did not have the new partitions + // + // 2) a topic was deleted and recreated with fewer partitions + // + // Both of these scenarios should be rare to non-existent, and we do + // nothing if we encounter them. + + // Migrating topicPartitions is a little tricky because we have to + // worry about underlying pointers that may currently be loaded. + for part, oldTP := range lv.partitions { + exists := part < len(r.partitions) + if !exists { + // This is the "deleted" case; see the comment above. + // + // We need to keep the partition around. For producing, + // the partition could be loaded and a record could be + // added to it after we bump the load error. For + // consuming, the partition is part of a group or part + // of what was loaded for direct consuming. + // + // We only clear a partition if it is purged from the + // client (which can happen automatically for consumers + // if the user opted into ConsumeRecreatedTopics). + dup := *oldTP + newTP := &dup + newTP.loadErr = errMissingMetadataPartition + + r.partitions = append(r.partitions, newTP) + + cl.cfg.logger.Log(LogLevelDebug, "metadata update is missing partition in topic, we are keeping the partition around for safety -- use PurgeTopicsFromClient if you wish to remove the topic", + "topic", topic, + "partition", part, + ) + if isProduce { + oldTP.records.bumpRepeatedLoadErr(errMissingMetadataPartition) + } + retryWhy.add(topic, int32(part), errMissingMetadataPartition) + continue + } + newTP := r.partitions[part] + + // Like above for the entire topic, an individual partition + // can have a load error. Unlike for the topic, individual + // partition errors are always retryable. + // + // If the load errored, we keep all old information minus the + // load error itself (the new load will have no information). + if newTP.loadErr != nil { + err := newTP.loadErr + *newTP = *oldTP + newTP.loadErr = err + if isProduce { + newTP.records.bumpRepeatedLoadErr(newTP.loadErr) + } else if !kerr.IsRetriable(newTP.loadErr) || cl.cfg.keepRetryableFetchErrors { + cl.consumer.addFakeReadyForDraining(topic, int32(part), newTP.loadErr, "metadata refresh has a load error on this partition") + } + retryWhy.add(topic, int32(part), newTP.loadErr) + continue + } + + // If the new partition has an older leader epoch, then we + // fetched from an out of date broker. We just keep the old + // information. + if newTP.leaderEpoch < oldTP.leaderEpoch { + // If we repeatedly rewind, then perhaps the cluster + // entered some bad state and lost forward progress. + // We will log & allow the rewind to allow the client + // to continue; other requests may encounter fenced + // epoch errors (and respectively recover). + // + // Five is a pretty low amount of retries, but since + // we iterate through known brokers, this basically + // means we keep stale metadata if five brokers all + // agree things rewound. + const maxEpochRewinds = 5 + if oldTP.epochRewinds < maxEpochRewinds { + cl.cfg.logger.Log(LogLevelDebug, "metadata leader epoch went backwards, ignoring update", + "topic", topic, + "partition", part, + "old_leader_epoch", oldTP.leaderEpoch, + "new_leader_epoch", newTP.leaderEpoch, + "current_num_rewinds", oldTP.epochRewinds+1, + ) + *newTP = *oldTP + newTP.epochRewinds++ + retryWhy.add(topic, int32(part), errEpochRewind) + continue + } + + cl.cfg.logger.Log(LogLevelInfo, "metadata leader epoch went backwards repeatedly, we are now keeping the metadata to allow forward progress", + "topic", topic, + "partition", part, + "old_leader_epoch", oldTP.leaderEpoch, + "new_leader_epoch", newTP.leaderEpoch, + ) + } + + if !isProduce { + var noID [16]byte + if newTP.cursor.topicID == noID && oldTP.cursor.topicID != noID { + cl.cfg.logger.Log(LogLevelWarn, "metadata update is missing the topic ID when we previously had one, ignoring update", + "topic", topic, + "partition", part, + ) + retryWhy.add(topic, int32(part), errMissingTopicID) + continue + } + } + + // If the tp data is the same, we simply copy over the records + // and cursor pointers. + // + // If the tp data equals the old, then the sink / source is the + // same, because the sink/source is from the tp leader. + if newTP.topicPartitionData == oldTP.topicPartitionData { + cl.cfg.logger.Log(LogLevelDebug, "metadata refresh has identical topic partition data", + "topic", topic, + "partition", part, + "leader", newTP.leader, + "leader_epoch", newTP.leaderEpoch, + ) + if isProduce { + newTP.records = oldTP.records + newTP.records.clearFailing() // always clear failing state for producing after meta update + } else { + newTP.cursor = oldTP.cursor // unlike records, there is no failing state for a cursor + } + } else { + cl.cfg.logger.Log(LogLevelDebug, "metadata refresh topic partition data changed", + "topic", topic, + "partition", part, + "new_leader", newTP.leader, + "new_leader_epoch", newTP.leaderEpoch, + "old_leader", oldTP.leader, + "old_leader_epoch", oldTP.leaderEpoch, + ) + if isProduce { + oldTP.migrateProductionTo(newTP) // migration clears failing state + } else { + oldTP.migrateCursorTo(newTP, css) + } + } + } + + // For any partitions **not currently in use**, we need to add them to + // the sink or source. If they are in use, they could be getting + // managed or moved by the sink or source itself, so we should not + // check the index field (which may be concurrently modified). + if len(lv.partitions) > len(r.partitions) { + return + } + newPartitions := r.partitions[len(lv.partitions):] + + // Anything left with a negative recBufsIdx / cursorsIdx is a new topic + // partition and must be added to the sink / source. + for _, newTP := range newPartitions { + if isProduce && newTP.records.recBufsIdx == -1 { + newTP.records.sink.addRecBuf(newTP.records) + } else if !isProduce && newTP.cursor.cursorsIdx == -1 { + newTP.cursor.source.addCursor(newTP.cursor) + } + } +} + +var ( + errEpochRewind = errors.New("epoch rewind") + errMissingTopicID = errors.New("missing topic ID") +) + +type multiUpdateWhy map[kerrOrString]map[string]map[int32]struct{} + +type kerrOrString struct { + k *kerr.Error + s string +} + +func (m *multiUpdateWhy) isOnly(err error) bool { + if m == nil { + return false + } + for e := range *m { + if !errors.Is(err, e.k) { + return false + } + } + return true +} + +func (m *multiUpdateWhy) add(t string, p int32, err error) { + if err == nil { + return + } + + if *m == nil { + *m = make(map[kerrOrString]map[string]map[int32]struct{}) + } + var ks kerrOrString + if ke := (*kerr.Error)(nil); errors.As(err, &ke) { + ks = kerrOrString{k: ke} + } else { + ks = kerrOrString{s: err.Error()} + } + + ts := (*m)[ks] + if ts == nil { + ts = make(map[string]map[int32]struct{}) + (*m)[ks] = ts + } + + ps := ts[t] + if ps == nil { + ps = make(map[int32]struct{}) + ts[t] = ps + } + // -1 signals that the entire topic had an error. + if p != -1 { + ps[p] = struct{}{} + } +} + +// err{topic[1 2 3] topic2[4 5 6]} err2{...} +func (m multiUpdateWhy) reason(reason string) string { + if len(m) == 0 { + return "" + } + + ksSorted := make([]kerrOrString, 0, len(m)) + for err := range m { + ksSorted = append(ksSorted, err) + } + sort.Slice(ksSorted, func(i, j int) bool { // order by non-nil kerr's code, otherwise the string + l, r := ksSorted[i], ksSorted[j] + return l.k != nil && (r.k == nil || l.k.Code < r.k.Code) || r.k == nil && l.s < r.s + }) + + var errorStrings []string + for _, ks := range ksSorted { + ts := m[ks] + tsSorted := make([]string, 0, len(ts)) + for t := range ts { + tsSorted = append(tsSorted, t) + } + sort.Strings(tsSorted) + + var topicStrings []string + for _, t := range tsSorted { + ps := ts[t] + if len(ps) == 0 { + topicStrings = append(topicStrings, t) + } else { + psSorted := make([]int32, 0, len(ps)) + for p := range ps { + psSorted = append(psSorted, p) + } + sort.Slice(psSorted, func(i, j int) bool { return psSorted[i] < psSorted[j] }) + topicStrings = append(topicStrings, fmt.Sprintf("%s%v", t, psSorted)) + } + } + + if ks.k != nil { + errorStrings = append(errorStrings, fmt.Sprintf("%s{%s}", ks.k.Message, strings.Join(topicStrings, " "))) + } else { + errorStrings = append(errorStrings, fmt.Sprintf("%s{%s}", ks.s, strings.Join(topicStrings, " "))) + } + } + if reason == "" { + return strings.Join(errorStrings, " ") + } + return reason + ": " + strings.Join(errorStrings, " ") +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go b/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go new file mode 100644 index 000000000000..46e7d11d124b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go @@ -0,0 +1,614 @@ +package kgo + +import ( + "math" + "math/rand" + "time" + + "github.com/twmb/franz-go/pkg/kbin" +) + +// Partitioner creates topic partitioners to determine which partition messages +// should be sent to. +// +// Note that a record struct is unmodified (minus a potential default topic) +// from producing through partitioning, so you can set fields in the record +// struct before producing to aid in partitioning with a custom partitioner. +type Partitioner interface { + // forTopic returns a partitioner for an individual topic. It is + // guaranteed that only one record will use the an individual topic's + // topicPartitioner at a time, meaning partitioning within a topic does + // not require locks. + ForTopic(string) TopicPartitioner +} + +// TopicPartitioner partitions records in an individual topic. +type TopicPartitioner interface { + // RequiresConsistency returns true if a record must hash to the same + // partition even if a partition is down. + // If true, a record may hash to a partition that cannot be written to + // and will error until the partition comes back. + RequiresConsistency(*Record) bool + // Partition determines, among a set of n partitions, which index should + // be chosen to use for the partition for r. + Partition(r *Record, n int) int +} + +// TopicPartitionerOnNewBatch is an optional extension interface to +// TopicPartitioner that calls OnNewBatch before any new batch is created. If +// buffering a record would cause a new batch, OnNewBatch is called. +// +// This interface allows for partitioner implementations that effectively pin +// to a partition until a new batch is created, after which the partitioner can +// choose which next partition to use. +type TopicPartitionerOnNewBatch interface { + // OnNewBatch is called when producing a record if that record would + // trigger a new batch on its current partition. + OnNewBatch() +} + +// TopicBackupPartitioner is an optional extension interface to +// TopicPartitioner that can partition by the number of records buffered. +// +// If a partitioner implements this interface, the Partition function will +// never be called. +type TopicBackupPartitioner interface { + TopicPartitioner + + // PartitionByBackup is similar to Partition, but has an additional + // backupIter. This iterator will return the number of buffered records + // per partition index. The iterator's Next function can only be called + // up to n times, calling it any more will panic. + PartitionByBackup(r *Record, n int, backupIter TopicBackupIter) int +} + +// TopicBackupIter is an iterates through partition indices. +type TopicBackupIter interface { + // Next returns the next partition index and the total buffered records + // for the partition. If Rem returns 0, calling this function again + // will panic. + Next() (int, int64) + // Rem returns the number of elements left to iterate through. + Rem() int +} + +//////////// +// SIMPLE // - BasicConsistent, Manual, RoundRobin +//////////// + +// BasicConsistentPartitioner wraps a single function to provide a Partitioner +// and TopicPartitioner (that function is essentially a combination of +// Partitioner.ForTopic and TopicPartitioner.Partition). +// +// As a minimal example, if you do not care about the topic and you set the +// partition before producing: +// +// kgo.BasicConsistentPartitioner(func(topic) func(*Record, int) int { +// return func(r *Record, n int) int { +// return int(r.Partition) +// } +// }) +func BasicConsistentPartitioner(partition func(string) func(r *Record, n int) int) Partitioner { + return &basicPartitioner{partition} +} + +type ( + basicPartitioner struct { + fn func(string) func(*Record, int) int + } + + basicTopicPartitioner struct { + fn func(*Record, int) int + } +) + +func (b *basicPartitioner) ForTopic(t string) TopicPartitioner { + return &basicTopicPartitioner{b.fn(t)} +} + +func (*basicTopicPartitioner) RequiresConsistency(*Record) bool { return true } +func (b *basicTopicPartitioner) Partition(r *Record, n int) int { return b.fn(r, n) } + +// ManualPartitioner is a partitioner that simply returns the Partition field +// that is already set on any record. +// +// Any record with an invalid partition will be immediately failed. This +// partitioner is simply the partitioner that is demonstrated in the +// BasicConsistentPartitioner documentation. +func ManualPartitioner() Partitioner { + return BasicConsistentPartitioner(func(string) func(*Record, int) int { + return func(r *Record, _ int) int { + return int(r.Partition) + } + }) +} + +// RoundRobinPartitioner is a partitioner that round-robin's through all +// available partitions. This algorithm has lower throughput and causes higher +// CPU load on brokers, but can be useful if you want to ensure an even +// distribution of records to partitions. +func RoundRobinPartitioner() Partitioner { + return new(roundRobinPartitioner) +} + +type ( + roundRobinPartitioner struct{} + + roundRobinTopicPartitioner struct { + on int + } +) + +func (*roundRobinPartitioner) ForTopic(string) TopicPartitioner { + return new(roundRobinTopicPartitioner) +} + +func (*roundRobinTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (r *roundRobinTopicPartitioner) Partition(_ *Record, n int) int { + if r.on >= n { + r.on = 0 + } + ret := r.on + r.on++ + return ret +} + +////////////////// +// LEAST BACKUP // +////////////////// + +// LeastBackupPartitioner prioritizes partitioning by three factors, in order: +// +// 1. pin to the current pick until there is a new batch +// 2. on new batch, choose the least backed up partition (the partition with +// the fewest amount of buffered records) +// 3. if multiple partitions are equally least-backed-up, choose one at random +// +// This algorithm prioritizes least-backed-up throughput, which may result in +// unequal partitioning. It is likely that this algorithm will talk most to the +// broker that it has the best connection to. +// +// This algorithm is resilient to brokers going down: if a few brokers die, it +// is possible your throughput will be so high that the maximum buffered +// records will be reached in the now-offline partitions before metadata +// responds that the broker is offline. With the standard partitioning +// algorithms, the only recovery is if the partition is remapped or if the +// broker comes back online. With the least backup partitioner, downed +// partitions will see slight backup, but then the other partitions that are +// still accepting writes will get all of the writes and your client will not +// be blocked. +// +// Under ideal scenarios (no broker / connection issues), StickyPartitioner is +// equivalent to LeastBackupPartitioner. This partitioner is only recommended +// if you are a producer consistently dealing with flaky connections or +// problematic brokers and do not mind uneven load on your brokers. +func LeastBackupPartitioner() Partitioner { + return new(leastBackupPartitioner) +} + +type ( + leastBackupInput struct{ mapping []*topicPartition } + + leastBackupPartitioner struct{} + + leastBackupTopicPartitioner struct { + onPart int + rng *rand.Rand + } +) + +func (i *leastBackupInput) Next() (int, int64) { + last := len(i.mapping) - 1 + buffered := i.mapping[last].records.buffered.Load() + i.mapping = i.mapping[:last] + return last, buffered +} + +func (i *leastBackupInput) Rem() int { + return len(i.mapping) +} + +func (*leastBackupPartitioner) ForTopic(string) TopicPartitioner { + return &leastBackupTopicPartitioner{ + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *leastBackupTopicPartitioner) OnNewBatch() { p.onPart = -1 } +func (*leastBackupTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (*leastBackupTopicPartitioner) Partition(*Record, int) int { panic("unreachable") } + +func (p *leastBackupTopicPartitioner) PartitionByBackup(_ *Record, n int, backup TopicBackupIter) int { + if p.onPart == -1 || p.onPart >= n { + leastBackup := int64(math.MaxInt64) + npicked := 0 + for ; n > 0; n-- { + pick, backup := backup.Next() + if backup < leastBackup { + leastBackup = backup + p.onPart = pick + npicked = 1 + } else { + npicked++ // reservoir sampling with k = 1 + if p.rng.Intn(npicked) == 0 { + p.onPart = pick + } + } + } + } + return p.onPart +} + +/////////////////// +// UNIFORM BYTES // +/////////////////// + +// UniformBytesPartitioner is a redux of the StickyPartitioner, proposed in +// KIP-794 and release with the Java client in Kafka 3.3. This partitioner +// returns the same partition until 'bytes' is hit. At that point, a +// re-partitioning happens. If adaptive is false, this chooses a new random +// partition, otherwise this chooses a broker based on the inverse of the +// backlog currently buffered for that broker. If keys is true, this uses +// standard hashing based on record key for records with non-nil keys. hasher +// is optional; if nil, the default hasher murmur2 (Kafka's default). +// +// The point of this hasher is to create larger batches while producing the +// same amount to all partitions over the long run. Adaptive opts in to a +// slight imbalance so that this can produce more to brokers that are less +// loaded. +// +// This implementation differs slightly from Kafka's because this does not +// account for the compressed size of a batch, nor batch overhead. For +// overhead, in practice, the overhead is relatively constant so it would +// affect all batches equally. For compression, this client does not compress +// until after a batch is created and frozen, so it is not possible to track +// compression. This client also uses the number of records for backup +// calculation rather than number of bytes, but the heuristic should be +// similar. Lastly, this client does not have a timeout for partition +// availability. Realistically, these will be the most backed up partitions so +// they should be chosen the least. +// +// NOTE: This implementation may create sub-optimal batches if lingering is +// enabled. This client's default is to disable lingering. The patch used to +// address this in Kafka is KAFKA-14156 (which itself is not perfect in the +// context of disabling lingering). For more details, read KAFKA-14156. +func UniformBytesPartitioner(bytes int, adaptive, keys bool, hasher PartitionerHasher) Partitioner { + if hasher == nil { + hasher = KafkaHasher(murmur2) + } + return &uniformBytesPartitioner{ + bytes, + adaptive, + keys, + hasher, + } +} + +type ( + uniformBytesPartitioner struct { + bytes int + adaptive bool + keys bool + hasher PartitionerHasher + } + + uniformBytesTopicPartitioner struct { + u uniformBytesPartitioner + bytes int + onPart int + rng *rand.Rand + + calc []struct { + f float64 + n int + } + } +) + +func (u *uniformBytesPartitioner) ForTopic(string) TopicPartitioner { + return &uniformBytesTopicPartitioner{ + u: *u, + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *uniformBytesTopicPartitioner) RequiresConsistency(r *Record) bool { + return p.u.keys && r.Key != nil +} +func (*uniformBytesTopicPartitioner) Partition(*Record, int) int { panic("unreachable") } + +func (p *uniformBytesTopicPartitioner) PartitionByBackup(r *Record, n int, backup TopicBackupIter) int { + if p.u.keys && r.Key != nil { + return p.u.hasher(r.Key, n) + } + + l := 1 + // attributes, int8 unused + 1 + // ts delta, 1 minimum (likely 2 or 3) + 1 + // offset delta, likely 1 + kbin.VarintLen(int32(len(r.Key))) + + len(r.Key) + + kbin.VarintLen(int32(len(r.Value))) + + len(r.Value) + + kbin.VarintLen(int32(len(r.Headers))) // varint array len headers + + for _, h := range r.Headers { + l += kbin.VarintLen(int32(len(h.Key))) + + len(h.Key) + + kbin.VarintLen(int32(len(h.Value))) + + len(h.Value) + } + + p.bytes += l + if p.bytes >= p.u.bytes { + p.bytes = l + p.onPart = -1 + } + + if p.onPart >= 0 && p.onPart < n { + return p.onPart + } + + if !p.u.adaptive { + p.onPart = p.rng.Intn(n) + } else { + p.calc = p.calc[:0] + + // For adaptive, the logic is that we pick by broker according + // to the inverse of the queue size. Presumably this means + // bytes, but we use records for simplicity. + // + // We calculate 1/recs for all brokers and choose the first one + // in this ordering that takes us negative. + // + // e.g., 1/1 + 1/3; pick is 0.2; 0.2*1.3333 = 0.26666; minus 1 + // is negative, meaning our pick is the first. If rng was 0.9, + // scaled is 1.2, meaning our pick is the second (-1, still + // positive, second pick takes us negative). + // + // To guard floating rounding problems, if we pick nothing, + // then this means we pick our last. + var t float64 + for ; n > 0; n-- { + n, backup := backup.Next() + backup++ // ensure non-zero + f := 1 / float64(backup) + t += f + p.calc = append(p.calc, struct { + f float64 + n int + }{f, n}) + } + r := p.rng.Float64() + pick := r * t + for _, c := range p.calc { + pick -= c.f + if pick <= 0 { + p.onPart = c.n + break + } + } + if p.onPart == -1 { + p.onPart = p.calc[len(p.calc)-1].n + } + } + return p.onPart +} + +///////////////////// +// STICKY & COMPAT // - Sticky, Kafka (custom hash), Sarama (custom hash) +///////////////////// + +// StickyPartitioner is the same as StickyKeyPartitioner, but with no logic to +// consistently hash keys. That is, this only partitions according to the +// sticky partition strategy. +func StickyPartitioner() Partitioner { + return new(stickyPartitioner) +} + +type ( + stickyPartitioner struct{} + + stickyTopicPartitioner struct { + lastPart int + onPart int + rng *rand.Rand + } +) + +func (*stickyPartitioner) ForTopic(string) TopicPartitioner { + p := newStickyTopicPartitioner() + return &p +} + +func newStickyTopicPartitioner() stickyTopicPartitioner { + return stickyTopicPartitioner{ + lastPart: -1, + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *stickyTopicPartitioner) OnNewBatch() { p.lastPart, p.onPart = p.onPart, -1 } +func (*stickyTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (p *stickyTopicPartitioner) Partition(_ *Record, n int) int { + if p.onPart == -1 || p.onPart >= n { + p.onPart = p.rng.Intn(n) + if p.onPart == p.lastPart { + p.onPart = (p.onPart + 1) % n + } + } + return p.onPart +} + +// StickyKeyPartitioner mirrors the default Java partitioner from Kafka's 2.4 +// release (see KIP-480 and KAFKA-8601) until their 3.3 release. This was +// replaced in 3.3 with the uniform sticky partitioner (KIP-794), which is +// reimplemented in this client as the UniformBytesPartitioner. +// +// This is the same "hash the key consistently, if no key, choose random +// partition" strategy that the Java partitioner has always used, but rather +// than always choosing a random partition, the partitioner pins a partition to +// produce to until that partition rolls over to a new batch. Only when rolling +// to new batches does this partitioner switch partitions. +// +// The benefit with this pinning is less CPU utilization on Kafka brokers. +// Over time, the random distribution is the same, but the brokers are handling +// on average larger batches. +// +// hasher is optional; if nil, this will return a partitioner that partitions +// exactly how Kafka does. Specifically, the partitioner will use murmur2 to +// hash keys, will mask out the 32nd bit, and then will mod by the number of +// potential partitions. +func StickyKeyPartitioner(hasher PartitionerHasher) Partitioner { + if hasher == nil { + hasher = KafkaHasher(murmur2) + } + return &keyPartitioner{hasher} +} + +// PartitionerHasher returns a partition to use given the input data and number +// of partitions. +type PartitionerHasher func([]byte, int) int + +// KafkaHasher returns a PartitionerHasher using hashFn that mirrors how Kafka +// partitions after hashing data. In Kafka, after hashing into a uint32, the +// hash is converted to an int32 and the high bit is stripped. Kafka by default +// uses murmur2 hashing, and the StickyKeyPartiitoner uses this by default. +// Using this KafkaHasher function is only necessary if you want to change the +// underlying hashing algorithm. +func KafkaHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + // https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java#L59 + // https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L865-L867 + // Masking before or after the int conversion makes no difference. + return int(hashFn(key)&0x7fffffff) % n + } +} + +// SaramaHasher is a historical misnamed partitioner. This library's original +// implementation of the SaramaHasher was incorrect, if you want an exact +// match for the Sarama partitioner, use the [SaramaCompatHasher]. +// +// This partitioner remains because as it turns out, other ecosystems provide +// a similar partitioner and this partitioner is useful for compatibility. +// +// In particular, using this function with a crc32.ChecksumIEEE hasher makes +// this partitioner match librdkafka's consistent partitioner, or the +// zendesk/ruby-kafka partitioner. +func SaramaHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + p := int(hashFn(key)) % n + if p < 0 { + p = -p + } + return p + } +} + +// SaramaCompatHasher returns a PartitionerHasher using hashFn that mirrors how +// Sarama partitions after hashing data. +// +// Sarama has two differences from Kafka when partitioning: +// +// 1) In Kafka, when converting the uint32 hash to an int32, Kafka masks the +// high bit. In Sarama, if the high bit is 1 (i.e., the number as an int32 is +// negative), Sarama negates the number. +// +// 2) Kafka by default uses the murmur2 hashing algorithm. Sarama by default +// uses fnv-1a. +// +// Sarama added a NewReferenceHashPartitioner function that attempted to align +// with Kafka, but the reference partitioner only fixed the first difference, +// not the second. Further customization options were added later that made it +// possible to exactly match Kafka when hashing. +// +// In short, to *exactly* match the Sarama defaults, use the following: +// +// kgo.StickyKeyPartitioner(kgo.SaramaCompatHasher(fnv32a)) +// +// Where fnv32a is a function returning a new 32 bit fnv-1a hasher. +// +// func fnv32a(b []byte) uint32 { +// h := fnv.New32a() +// h.Reset() +// h.Write(b) +// return h.Sum32() +// } +func SaramaCompatHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + p := int32(hashFn(key)) % int32(n) + if p < 0 { + p = -p + } + return int(p) + } +} + +type ( + keyPartitioner struct { + hasher PartitionerHasher + } + + stickyKeyTopicPartitioner struct { + hasher PartitionerHasher + stickyTopicPartitioner + } +) + +func (k *keyPartitioner) ForTopic(string) TopicPartitioner { + return &stickyKeyTopicPartitioner{k.hasher, newStickyTopicPartitioner()} +} + +func (*stickyKeyTopicPartitioner) RequiresConsistency(r *Record) bool { return r.Key != nil } +func (p *stickyKeyTopicPartitioner) Partition(r *Record, n int) int { + if r.Key != nil { + return p.hasher(r.Key, n) + } + return p.stickyTopicPartitioner.Partition(r, n) +} + +///////////// +// MURMUR2 // +///////////// + +// Straight from the C++ code and from the Java code duplicating it. +// https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L383-L421 +// https://github.com/aappleby/smhasher/blob/61a0530f/src/MurmurHash2.cpp#L37-L86 +// +// The Java code uses ints but with unsigned shifts; we do not need to. +func murmur2(b []byte) uint32 { + const ( + seed uint32 = 0x9747b28c + m uint32 = 0x5bd1e995 + r = 24 + ) + h := seed ^ uint32(len(b)) + for len(b) >= 4 { + k := uint32(b[3])<<24 + uint32(b[2])<<16 + uint32(b[1])<<8 + uint32(b[0]) + b = b[4:] + k *= m + k ^= k >> r + k *= m + + h *= m + h ^= k + } + switch len(b) { + case 3: + h ^= uint32(b[2]) << 16 + fallthrough + case 2: + h ^= uint32(b[1]) << 8 + fallthrough + case 1: + h ^= uint32(b[0]) + h *= m + } + + h ^= h >> 13 + h *= m + h ^= h >> 15 + return h +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go new file mode 100644 index 000000000000..d9cca9920aac --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go @@ -0,0 +1,1226 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type producer struct { + inflight atomicI64 // high 16: # waiters, low 48: # inflight + + // mu and c are used for flush and drain notifications; mu is used for + // a few other tight locks. + mu sync.Mutex + c *sync.Cond + + bufferedRecords int64 + bufferedBytes int64 + + cl *Client + + topicsMu sync.Mutex // locked to prevent concurrent updates; reads are always atomic + topics *topicsPartitions + + // Hooks exist behind a pointer because likely they are not used. + // We only take up one byte vs. 6. + hooks *struct { + buffered []HookProduceRecordBuffered + partitioned []HookProduceRecordPartitioned + unbuffered []HookProduceRecordUnbuffered + } + + hasHookBatchWritten bool + + // unknownTopics buffers all records for topics that are not loaded. + // The map is to a pointer to a slice for reasons documented in + // waitUnknownTopic. + unknownTopicsMu sync.Mutex + unknownTopics map[string]*unknownTopicProduces + + id atomic.Value + producingTxn atomicBool + + // We must have a producer field for flushing; we cannot just have a + // field on recBufs that is toggled on flush. If we did, then a new + // recBuf could be created and records sent to while we are flushing. + flushing atomicI32 // >0 if flushing, can Flush many times concurrently + blocked atomicI32 // >0 if over max recs or bytes + blockedBytes int64 + + aborting atomicI32 // >0 if aborting, can abort many times concurrently + + idMu sync.Mutex + idVersion int16 + + batchPromises ringBatchPromise + promisesMu sync.Mutex + + txnMu sync.Mutex + inTxn bool + + // If using EndBeginTxnUnsafe, and any partitions are actually produced + // to, we issue an AddPartitionsToTxn at the end to re-add them to a + // new transaction. We have to due to logic races: the broker may not + // have handled the produce requests yet, and we want to ensure a new + // transaction is started. + // + // If the user stops producing, we want to ensure that our restarted + // transaction is actually ended. Thus, we set readded whenever we have + // partitions we actually restart. We issue EndTxn and reset readded in + // EndAndBegin; if nothing more was produced to, we ensure we finish + // the started txn. + readded bool +} + +// BufferedProduceRecords returns the number of records currently buffered for +// producing within the client. +// +// This can be used as a gauge to determine how far behind the client is for +// flushing records produced by your client (which can help determine network / +// cluster health). +func (cl *Client) BufferedProduceRecords() int64 { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + return cl.producer.bufferedRecords + int64(cl.producer.blocked.Load()) +} + +// BufferedProduceBytes returns the number of bytes currently buffered for +// producing within the client. This is the sum of all keys, values, and header +// keys/values. See the related [BufferedProduceRecords] for more information. +func (cl *Client) BufferedProduceBytes() int64 { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + return cl.producer.bufferedBytes + cl.producer.blockedBytes +} + +type unknownTopicProduces struct { + buffered []promisedRec + wait chan error // retryable errors + fatal chan error // must-signal quit errors; capacity 1 +} + +func (p *producer) init(cl *Client) { + p.cl = cl + p.topics = newTopicsPartitions() + p.unknownTopics = make(map[string]*unknownTopicProduces) + p.idVersion = -1 + p.id.Store(&producerID{ + id: -1, + epoch: -1, + err: errReloadProducerID, + }) + p.c = sync.NewCond(&p.mu) + + inithooks := func() { + if p.hooks == nil { + p.hooks = &struct { + buffered []HookProduceRecordBuffered + partitioned []HookProduceRecordPartitioned + unbuffered []HookProduceRecordUnbuffered + }{} + } + } + + cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookProduceRecordBuffered); ok { + inithooks() + p.hooks.buffered = append(p.hooks.buffered, h) + } + if h, ok := h.(HookProduceRecordPartitioned); ok { + inithooks() + p.hooks.partitioned = append(p.hooks.partitioned, h) + } + if h, ok := h.(HookProduceRecordUnbuffered); ok { + inithooks() + p.hooks.unbuffered = append(p.hooks.unbuffered, h) + } + if _, ok := h.(HookProduceBatchWritten); ok { + p.hasHookBatchWritten = true + } + }) +} + +func (p *producer) purgeTopics(topics []string) { + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + + p.unknownTopicsMu.Lock() + for _, topic := range topics { + if unknown, exists := p.unknownTopics[topic]; exists { + delete(p.unknownTopics, topic) + close(unknown.wait) + p.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: errPurged, + }) + } + } + p.unknownTopicsMu.Unlock() + + toStore := p.topics.clone() + defer p.topics.storeData(toStore) + + for _, topic := range topics { + d := toStore.loadTopic(topic) + if d == nil { + continue + } + delete(toStore, topic) + for _, p := range d.partitions { + r := p.records + + // First we set purged, so that anything in the process + // of being buffered will immediately fail when it goes + // to buffer. + r.mu.Lock() + r.purged = true + r.mu.Unlock() + + // Now we remove from the sink. When we do, the recBuf + // is effectively abandonded. Any active produces may + // finish before we fail the records; if they finish + // after they will no longer belong in the batch, but + // they may have been produced. This is the duplicate + // risk a user runs when purging. + r.sink.removeRecBuf(r) + + // Once abandonded, we now need to fail anything that + // was buffered. + go func() { + r.mu.Lock() + defer r.mu.Unlock() + r.failAllRecords(errPurged) + }() + } + } +} + +func (p *producer) isAborting() bool { return p.aborting.Load() > 0 } + +func noPromise(*Record, error) {} + +// ProduceResult is the result of producing a record in a synchronous manner. +type ProduceResult struct { + // Record is the produced record. It is always non-nil. + // + // If this record was produced successfully, its attrs / offset / id / + // epoch / etc. fields are filled in on return if possible (i.e. when + // producing with acks required). + Record *Record + + // Err is a potential produce error. If this is non-nil, the record was + // not produced successfully. + Err error +} + +// ProduceResults is a collection of produce results. +type ProduceResults []ProduceResult + +// FirstErr returns the first erroring result, if any. +func (rs ProduceResults) FirstErr() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// First the first record and error in the produce results. +// +// This function is useful if you only passed one record to ProduceSync. +func (rs ProduceResults) First() (*Record, error) { + return rs[0].Record, rs[0].Err +} + +// ProduceSync is a synchronous produce. See the Produce documentation for an +// in depth description of how producing works. +// +// This function produces all records in one range loop and waits for them all +// to be produced before returning. +func (cl *Client) ProduceSync(ctx context.Context, rs ...*Record) ProduceResults { + var ( + wg sync.WaitGroup + results = make(ProduceResults, 0, len(rs)) + promise = func(r *Record, err error) { + results = append(results, ProduceResult{r, err}) + wg.Done() + } + ) + + wg.Add(len(rs)) + for _, r := range rs { + cl.Produce(ctx, r, promise) + } + wg.Wait() + + return results +} + +// FirstErrPromise is a helper type to capture only the first failing error +// when producing a batch of records with this type's Promise function. +// +// This is useful for when you only care about any record failing, and can use +// that as a signal (i.e., to abort a batch). The AbortingFirstErrPromise +// function can be used to abort all records as soon as the first error is +// encountered. If you do not need to abort, you can use this type with no +// constructor. +// +// This is similar to using ProduceResult's FirstErr function. +type FirstErrPromise struct { + wg sync.WaitGroup + once atomicBool + err error + cl *Client +} + +// AbortingFirstErrPromise returns a FirstErrPromise that will call the +// client's AbortBufferedRecords function if an error is encountered. +// +// This can be used to quickly exit when any error is encountered, rather than +// waiting while flushing only to discover things errored. +func AbortingFirstErrPromise(cl *Client) *FirstErrPromise { + return &FirstErrPromise{ + cl: cl, + } +} + +// Promise is a promise for producing that will store the first error +// encountered. +func (f *FirstErrPromise) promise(_ *Record, err error) { + defer f.wg.Done() + if err != nil && !f.once.Swap(true) { + f.err = err + if f.cl != nil { + f.wg.Add(1) + go func() { + defer f.wg.Done() + f.cl.AbortBufferedRecords(context.Background()) + }() + } + } +} + +// Promise returns a promise for producing that will store the first error +// encountered. +// +// The returned promise must eventually be called, because a FirstErrPromise +// does not return from 'Err' until all promises are completed. +func (f *FirstErrPromise) Promise() func(*Record, error) { + f.wg.Add(1) + return f.promise +} + +// Err waits for all promises to complete and then returns any stored error. +func (f *FirstErrPromise) Err() error { + f.wg.Wait() + return f.err +} + +// TryProduce is similar to Produce, but rather than blocking if the client +// currently has MaxBufferedRecords or MaxBufferedBytes buffered, this fails +// immediately with ErrMaxBuffered. See the Produce documentation for more +// details. +func (cl *Client) TryProduce( + ctx context.Context, + r *Record, + promise func(*Record, error), +) { + cl.produce(ctx, r, promise, false) +} + +// Produce sends a Kafka record to the topic in the record's Topic field, +// calling an optional `promise` with the record and a potential error when +// Kafka replies. For a synchronous produce, see ProduceSync. Records are +// produced in order per partition if the record is produced successfully. +// Successfully produced records will have their attributes, offset, and +// partition set before the promise is called. All promises are called serially +// (and should be relatively fast). If a record's timestamp is unset, this +// sets the timestamp to time.Now. +// +// If the topic field is empty, the client will use the DefaultProduceTopic; if +// that is also empty, the record is failed immediately. If the record is too +// large to fit in a batch on its own in a produce request, the record will be +// failed with immediately kerr.MessageTooLarge. +// +// If the client is configured to automatically flush the client currently has +// the configured maximum amount of records buffered, Produce will block. The +// context can be used to cancel waiting while records flush to make space. In +// contrast, if flushing is configured, the record will be failed immediately +// with ErrMaxBuffered (this same behavior can be had with TryProduce). +// +// Once a record is buffered into a batch, it can be canceled in three ways: +// canceling the context, the record timing out, or hitting the maximum +// retries. If any of these conditions are hit and it is currently safe to fail +// records, all buffered records for the relevant partition are failed. Only +// the first record's context in a batch is considered when determining whether +// the batch should be canceled. A record is not safe to fail if the client +// is idempotently producing and a request has been sent; in this case, the +// client cannot know if the broker actually processed the request (if so, then +// removing the records from the client will create errors the next time you +// produce). +// +// If the client is transactional and a transaction has not been begun, the +// promise is immediately called with an error corresponding to not being in a +// transaction. +func (cl *Client) Produce( + ctx context.Context, + r *Record, + promise func(*Record, error), +) { + cl.produce(ctx, r, promise, true) +} + +func (cl *Client) produce( + ctx context.Context, + r *Record, + promise func(*Record, error), + block bool, +) { + if ctx == nil { + ctx = context.Background() + } + if r.Context == nil { + r.Context = ctx + } + if promise == nil { + promise = noPromise + } + if r.Topic == "" { + r.Topic = cl.cfg.defaultProduceTopic + } + + p := &cl.producer + if p.hooks != nil && len(p.hooks.buffered) > 0 { + for _, h := range p.hooks.buffered { + h.OnProduceRecordBuffered(r) + } + } + + // We can now fail the rec after the buffered hook. + if r.Topic == "" { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, errNoTopic) + return + } + if cl.cfg.txnID != nil && !p.producingTxn.Load() { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, errNotInTransaction) + return + } + + userSize := r.userSize() + if cl.cfg.maxBufferedBytes > 0 && userSize > cl.cfg.maxBufferedBytes { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, kerr.MessageTooLarge) + return + } + + // We have to grab the produce lock to check if this record will exceed + // configured limits. We try to keep the logic tight since this is + // effectively a global lock around producing. + var ( + nextBufRecs, nextBufBytes int64 + overMaxRecs, overMaxBytes bool + + calcNums = func() { + nextBufRecs = p.bufferedRecords + 1 + nextBufBytes = p.bufferedBytes + userSize + overMaxRecs = nextBufRecs > cl.cfg.maxBufferedRecords + overMaxBytes = cl.cfg.maxBufferedBytes > 0 && nextBufBytes > cl.cfg.maxBufferedBytes + } + ) + p.mu.Lock() + calcNums() + if overMaxRecs || overMaxBytes { + if !block || cl.cfg.manualFlushing { + p.mu.Unlock() + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, ErrMaxBuffered) + return + } + + // Before we potentially unlinger, add that we are blocked to + // ensure we do NOT start a linger anymore. We THEN wakeup + // anything that is actively lingering. Note that blocked is + // also used when finishing promises to see if we need to be + // notified. + p.blocked.Add(1) + p.blockedBytes += userSize + p.mu.Unlock() + + cl.cfg.logger.Log(LogLevelDebug, "blocking Produce because we are either over max buffered records or max buffered bytes", + "over_max_records", overMaxRecs, + "over_max_bytes", overMaxBytes, + ) + + cl.unlingerDueToMaxRecsBuffered() + + // We keep the lock when we exit. If we are flushing, we want + // this blocked record to be produced before we return from + // flushing. This blocked record will be accounted for in the + // bufferedRecords addition below, after being removed from + // blocked in the goroutine. + wait := make(chan struct{}) + var quit bool + go func() { + defer close(wait) + p.mu.Lock() + calcNums() + for !quit && (overMaxRecs || overMaxBytes) { + p.c.Wait() + calcNums() + } + p.blocked.Add(-1) + p.blockedBytes -= userSize + }() + + drainBuffered := func(err error) { + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() // wake the goroutine above + <-wait + p.mu.Unlock() // we wait for the goroutine to exit, then unlock again (since the goroutine leaves the mutex locked) + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, err) + } + + select { + case <-wait: + cl.cfg.logger.Log(LogLevelDebug, "Produce block awoken, we now have space to produce, continuing to partition and produce") + case <-cl.ctx.Done(): + drainBuffered(ErrClientClosed) + cl.cfg.logger.Log(LogLevelDebug, "client ctx canceled while blocked in Produce, returning") + return + case <-ctx.Done(): + drainBuffered(ctx.Err()) + cl.cfg.logger.Log(LogLevelDebug, "produce ctx canceled while blocked in Produce, returning") + return + } + } + p.bufferedRecords = nextBufRecs + p.bufferedBytes = nextBufBytes + p.mu.Unlock() + + cl.partitionRecord(promisedRec{ctx, promise, r}) +} + +type batchPromise struct { + baseOffset int64 + pid int64 + epoch int16 + attrs RecordAttrs + beforeBuf bool + partition int32 + recs []promisedRec + err error +} + +func (p *producer) promiseBatch(b batchPromise) { + if first := p.batchPromises.push(b); first { + go p.finishPromises(b) + } +} + +func (p *producer) promiseRecord(pr promisedRec, err error) { + p.promiseBatch(batchPromise{recs: []promisedRec{pr}, err: err}) +} + +func (p *producer) promiseRecordBeforeBuf(pr promisedRec, err error) { + p.promiseBatch(batchPromise{recs: []promisedRec{pr}, beforeBuf: true, err: err}) +} + +func (p *producer) finishPromises(b batchPromise) { + cl := p.cl + var more bool +start: + p.promisesMu.Lock() + for i, pr := range b.recs { + pr.LeaderEpoch = 0 + pr.Offset = b.baseOffset + int64(i) + pr.Partition = b.partition + pr.ProducerID = b.pid + pr.ProducerEpoch = b.epoch + pr.Attrs = b.attrs + cl.finishRecordPromise(pr, b.err, b.beforeBuf) + b.recs[i] = promisedRec{} + } + p.promisesMu.Unlock() + if cap(b.recs) > 4 { + cl.prsPool.put(b.recs) + } + + b, more = p.batchPromises.dropPeek() + if more { + goto start + } +} + +func (cl *Client) finishRecordPromise(pr promisedRec, err error, beforeBuffering bool) { + p := &cl.producer + + if p.hooks != nil && len(p.hooks.unbuffered) > 0 { + for _, h := range p.hooks.unbuffered { + h.OnProduceRecordUnbuffered(pr.Record, err) + } + } + + // Capture user size before potential modification by the promise. + // + // We call the promise before finishing the flush notification, + // allowing users of Flush to know all buf recs are done by the + // time we notify flush below. + userSize := pr.userSize() + pr.promise(pr.Record, err) + + // If this record was never buffered, it's size was never accounted + // for on any p field: return early. + if beforeBuffering { + return + } + + // Keep the lock as tight as possible: the broadcast can come after. + p.mu.Lock() + p.bufferedBytes -= userSize + p.bufferedRecords-- + broadcast := p.blocked.Load() > 0 || p.bufferedRecords == 0 && p.flushing.Load() > 0 + p.mu.Unlock() + + if broadcast { + p.c.Broadcast() + } +} + +// partitionRecord loads the partitions for a topic and produce to them. If +// the topic does not currently exist, the record is buffered in unknownTopics +// for a metadata update to deal with. +func (cl *Client) partitionRecord(pr promisedRec) { + parts, partsData := cl.partitionsForTopicProduce(pr) + if parts == nil { // saved in unknownTopics + return + } + cl.doPartitionRecord(parts, partsData, pr) +} + +// doPartitionRecord is separate so that metadata updates that load unknown +// partitions can call this directly. +func (cl *Client) doPartitionRecord(parts *topicPartitions, partsData *topicPartitionsData, pr promisedRec) { + if partsData.loadErr != nil && !kerr.IsRetriable(partsData.loadErr) { + cl.producer.promiseRecord(pr, partsData.loadErr) + return + } + + parts.partsMu.Lock() + defer parts.partsMu.Unlock() + if parts.partitioner == nil { + parts.partitioner = cl.cfg.partitioner.ForTopic(pr.Topic) + } + + mapping := partsData.writablePartitions + if parts.partitioner.RequiresConsistency(pr.Record) { + mapping = partsData.partitions + } + if len(mapping) == 0 { + cl.producer.promiseRecord(pr, errors.New("unable to partition record due to no usable partitions")) + return + } + + var pick int + tlp, _ := parts.partitioner.(TopicBackupPartitioner) + if tlp != nil { + if parts.lb == nil { + parts.lb = new(leastBackupInput) + } + parts.lb.mapping = mapping + pick = tlp.PartitionByBackup(pr.Record, len(mapping), parts.lb) + } else { + pick = parts.partitioner.Partition(pr.Record, len(mapping)) + } + if pick < 0 || pick >= len(mapping) { + cl.producer.promiseRecord(pr, fmt.Errorf("invalid record partitioning choice of %d from %d available", pick, len(mapping))) + return + } + + partition := mapping[pick] + + onNewBatch, _ := parts.partitioner.(TopicPartitionerOnNewBatch) + abortOnNewBatch := onNewBatch != nil + processed := partition.records.bufferRecord(pr, abortOnNewBatch) // KIP-480 + if !processed { + onNewBatch.OnNewBatch() + + if tlp != nil { + parts.lb.mapping = mapping + pick = tlp.PartitionByBackup(pr.Record, len(mapping), parts.lb) + } else { + pick = parts.partitioner.Partition(pr.Record, len(mapping)) + } + + if pick < 0 || pick >= len(mapping) { + cl.producer.promiseRecord(pr, fmt.Errorf("invalid record partitioning choice of %d from %d available", pick, len(mapping))) + return + } + partition = mapping[pick] + partition.records.bufferRecord(pr, false) // KIP-480 + } +} + +// ProducerID returns, loading if necessary, the current producer ID and epoch. +// This returns an error if the producer ID could not be loaded, if the +// producer ID has fatally errored, or if the context is canceled. +func (cl *Client) ProducerID(ctx context.Context) (int64, int16, error) { + var ( + id int64 + epoch int16 + err error + + done = make(chan struct{}) + ) + + go func() { + defer close(done) + id, epoch, err = cl.producerID(ctx2fn(ctx)) + }() + + select { + case <-ctx.Done(): + return 0, 0, ctx.Err() + case <-done: + return id, epoch, err + } +} + +type producerID struct { + id int64 + epoch int16 + err error +} + +var errReloadProducerID = errors.New("producer id needs reloading") + +// initProducerID initializes the client's producer ID for idempotent +// producing only (no transactions, which are more special). After the first +// load, this clears all buffered unknown topics. +func (cl *Client) producerID(ctxFn func() context.Context) (int64, int16, error) { + p := &cl.producer + + id := p.id.Load().(*producerID) + if errors.Is(id.err, errReloadProducerID) { + p.idMu.Lock() + defer p.idMu.Unlock() + + if id = p.id.Load().(*producerID); errors.Is(id.err, errReloadProducerID) { + if cl.cfg.disableIdempotency { + cl.cfg.logger.Log(LogLevelInfo, "skipping producer id initialization because the client was configured to disable idempotent writes") + id = &producerID{ + id: -1, + epoch: -1, + err: nil, + } + p.id.Store(id) + } else if cl.cfg.txnID == nil && id.id >= 0 && id.epoch < math.MaxInt16-1 { + // For the idempotent producer, as specified in KIP-360, + // if we had an ID, we can bump the epoch locally. + // If we are at the max epoch, we will ask for a new ID. + cl.resetAllProducerSequences() + id = &producerID{ + id: id.id, + epoch: id.epoch + 1, + err: nil, + } + p.id.Store(id) + } else { + newID, keep := cl.doInitProducerID(ctxFn, id.id, id.epoch) + if keep { + id = newID + // Whenever we have a new producer ID, we need + // our sequence numbers to be 0. On the first + // record produced, this will be true, but if + // we were signaled to reset the producer ID, + // then we definitely still need to reset here. + cl.resetAllProducerSequences() + p.id.Store(id) + } else { + // If we are not keeping the producer ID, + // we will return our old ID but with a + // static error that we can check or bubble + // up where needed. + id = &producerID{ + id: id.id, + epoch: id.epoch, + err: &errProducerIDLoadFail{newID.err}, + } + } + } + } + } + + return id.id, id.epoch, id.err +} + +// As seen in KAFKA-12152, if we bump an epoch, we have to reset sequence nums +// for every partition. Otherwise, we will use a new id/epoch for a partition +// and trigger OOOSN errors. +// +// Pre 2.5, this function is only be called if it is acceptable to continue +// on data loss (idempotent producer with no StopOnDataLoss option). +// +// 2.5+, it is safe to call this if the producer ID can be reset (KIP-360), +// in EndTransaction. +func (cl *Client) resetAllProducerSequences() { + for _, tp := range cl.producer.topics.load() { + for _, p := range tp.load().partitions { + p.records.mu.Lock() + p.records.needSeqReset = true + p.records.mu.Unlock() + } + } +} + +func (cl *Client) failProducerID(id int64, epoch int16, err error) { + p := &cl.producer + + // We do not lock the idMu when failing a producer ID, for two reasons. + // + // 1) With how we store below, we do not need to. We only fail if the + // ID we are failing has not changed and if the ID we are failing has + // not failed already. Failing outside the lock is the same as failing + // within the lock. + // + // 2) Locking would cause a deadlock, because producerID locks + // idMu=>recBuf.Mu, whereas we failing while locked within a recBuf in + // sink.go. + new := &producerID{ + id: id, + epoch: epoch, + err: err, + } + for { + current := p.id.Load().(*producerID) + if current.id != id || current.epoch != epoch { + cl.cfg.logger.Log(LogLevelInfo, "ignoring a fail producer id request due to current id being different", + "current_id", current.id, + "current_epoch", current.epoch, + "current_err", current.err, + "fail_id", id, + "fail_epoch", epoch, + "fail_err", err, + ) + return + } + if current.err != nil { + cl.cfg.logger.Log(LogLevelInfo, "ignoring a fail producer id because our producer id has already been failed", + "current_id", current.id, + "current_epoch", current.epoch, + "current_err", current.err, + "fail_err", err, + ) + return + } + if p.id.CompareAndSwap(current, new) { + return + } + } +} + +// doInitProducerID inits the idempotent ID and potentially the transactional +// producer epoch, returning whether to keep the result. +func (cl *Client) doInitProducerID(ctxFn func() context.Context, lastID int64, lastEpoch int16) (*producerID, bool) { + cl.cfg.logger.Log(LogLevelInfo, "initializing producer id") + req := kmsg.NewPtrInitProducerIDRequest() + req.TransactionalID = cl.cfg.txnID + req.ProducerID = lastID + req.ProducerEpoch = lastEpoch + if cl.cfg.txnID != nil { + req.TransactionTimeoutMillis = int32(cl.cfg.txnTimeout.Milliseconds()) + } + + ctx := ctxFn() + resp, err := req.RequestWith(ctx, cl) + if err != nil { + if errors.Is(err, errUnknownRequestKey) || errors.Is(err, errBrokerTooOld) { + cl.cfg.logger.Log(LogLevelInfo, "unable to initialize a producer id because the broker is too old or the client is pinned to an old version, continuing without a producer id") + return &producerID{-1, -1, nil}, true + } + if errors.Is(err, errChosenBrokerDead) { + select { + case <-cl.ctx.Done(): + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization failure due to dying client", "err", err) + return &producerID{lastID, lastEpoch, ErrClientClosed}, true + default: + } + } + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization failure, discarding initialization attempt", "err", err) + return &producerID{lastID, lastEpoch, err}, false + } + + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + // We could receive concurrent transactions; this is ignorable + // and we just want to re-init. + if kerr.IsRetriable(err) || errors.Is(err, kerr.ConcurrentTransactions) { + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization resulted in retryable error, discarding initialization attempt", "err", err) + return &producerID{lastID, lastEpoch, err}, false + } + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization errored", "err", err) + return &producerID{lastID, lastEpoch, err}, true + } + + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization success", "id", resp.ProducerID, "epoch", resp.ProducerEpoch) + + // We track if this was v3. We do not need to gate this behind a mutex, + // because the only other use is EndTransaction's read, which is + // documented to only be called sequentially after producing. + if cl.producer.idVersion == -1 { + cl.producer.idVersion = req.Version + } + + return &producerID{resp.ProducerID, resp.ProducerEpoch, nil}, true +} + +// partitionsForTopicProduce returns the topic partitions for a record. +// If the topic is not loaded yet, this buffers the record and returns +// nil, nil. +func (cl *Client) partitionsForTopicProduce(pr promisedRec) (*topicPartitions, *topicPartitionsData) { + p := &cl.producer + topic := pr.Topic + + topics := p.topics.load() + parts, exists := topics[topic] + if exists { + if v := parts.load(); len(v.partitions) > 0 { + return parts, v + } + } + + if !exists { // topic did not exist: check again under mu and potentially create it + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + + if parts, exists = p.topics.load()[topic]; !exists { // update parts for below + // Before we store the new topic, we lock unknown + // topics to prevent a concurrent metadata update + // seeing our new topic before we are waiting from the + // addUnknownTopicRecord fn. Otherwise, we would wait + // and never be re-notified. + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + p.topics.storeTopics([]string{topic}) + cl.addUnknownTopicRecord(pr) + cl.triggerUpdateMetadataNow("forced load because we are producing to a topic for the first time") + return nil, nil + } + } + + // Here, the topic existed, but maybe has not loaded partitions yet. We + // have to lock unknown topics first to ensure ordering just in case a + // load has not happened. + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + if v := parts.load(); len(v.partitions) > 0 { + return parts, v + } + cl.addUnknownTopicRecord(pr) + cl.triggerUpdateMetadata(false, "reload trigger due to produce topic still not known") + + return nil, nil // our record is buffered waiting for metadata update; nothing to return +} + +// addUnknownTopicRecord adds a record to a topic whose partitions are +// currently unknown. This is always called with the unknownTopicsMu held. +func (cl *Client) addUnknownTopicRecord(pr promisedRec) { + unknown := cl.producer.unknownTopics[pr.Topic] + if unknown == nil { + unknown = &unknownTopicProduces{ + buffered: make([]promisedRec, 0, 100), + wait: make(chan error, 5), + fatal: make(chan error, 1), + } + cl.producer.unknownTopics[pr.Topic] = unknown + } + unknown.buffered = append(unknown.buffered, pr) + if len(unknown.buffered) == 1 { + go cl.waitUnknownTopic(pr.ctx, pr.Record.Context, pr.Topic, unknown) + } +} + +// waitUnknownTopic waits for a notification +func (cl *Client) waitUnknownTopic( + pctx context.Context, // context passed to Produce + rctx context.Context, // context on the record itself + topic string, + unknown *unknownTopicProduces, +) { + cl.cfg.logger.Log(LogLevelInfo, "producing to a new topic for the first time, fetching metadata to learn its partitions", "topic", topic) + + var ( + tries int + unknownTries int64 + err error + after <-chan time.Time + ) + + if timeout := cl.cfg.recordTimeout; timeout > 0 { + timer := time.NewTimer(cl.cfg.recordTimeout) + defer timer.Stop() + after = timer.C + } + + // Ordering: aborting is set first, then unknown topics are manually + // canceled in a lock. New unknown topics after that lock will see + // aborting here and immediately cancel themselves. + if cl.producer.isAborting() { + err = ErrAborting + } + + for err == nil { + select { + case <-pctx.Done(): + err = pctx.Err() + case <-rctx.Done(): + err = rctx.Err() + case <-cl.ctx.Done(): + err = ErrClientClosed + case <-after: + err = ErrRecordTimeout + case err = <-unknown.fatal: + case retryableErr, ok := <-unknown.wait: + if !ok { + cl.cfg.logger.Log(LogLevelInfo, "done waiting for metadata for new topic", "topic", topic) + return // metadata was successful! + } + cl.cfg.logger.Log(LogLevelInfo, "new topic metadata wait failed, retrying wait", "topic", topic, "err", retryableErr) + tries++ + if int64(tries) >= cl.cfg.recordRetries { + err = fmt.Errorf("no partitions available after attempting to refresh metadata %d times, last err: %w", tries, retryableErr) + } + if cl.cfg.maxUnknownFailures >= 0 && errors.Is(retryableErr, kerr.UnknownTopicOrPartition) { + unknownTries++ + if unknownTries > cl.cfg.maxUnknownFailures { + err = retryableErr + } + } + } + } + + // If we errored above, we come down here to potentially clear the + // topic wait and fail all buffered records. However, under some + // extreme conditions, a quickly following metadata update could delete + // our unknown topic, and then a produce could recreate a new unknown + // topic. We only delete and finish promises if the pointer in the + // unknown topic map is still the same. + p := &cl.producer + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + nowUnknown := p.unknownTopics[topic] + if nowUnknown != unknown { + return + } + cl.cfg.logger.Log(LogLevelInfo, "new topic metadata wait failed, done retrying, failing all records", "topic", topic, "err", err) + + delete(p.unknownTopics, topic) + p.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: err, + }) +} + +func (cl *Client) unlingerDueToMaxRecsBuffered() { + if cl.cfg.linger <= 0 { + return + } + for _, parts := range cl.producer.topics.load() { + for _, part := range parts.load().partitions { + part.records.unlingerAndManuallyDrain() + } + } + cl.cfg.logger.Log(LogLevelDebug, "unlingered all partitions due to hitting max buffered") +} + +// Flush hangs waiting for all buffered records to be flushed, stopping all +// lingers if necessary. +// +// If the context finishes (Done), this returns the context's error. +// +// This function is safe to call multiple times concurrently, and safe to call +// concurrent with Flush. +func (cl *Client) Flush(ctx context.Context) error { + p := &cl.producer + + // Signal to finishRecord that we want to be notified once buffered hits 0. + // Also forbid any new producing to start a linger. + p.flushing.Add(1) + defer p.flushing.Add(-1) + + cl.cfg.logger.Log(LogLevelInfo, "flushing") + defer cl.cfg.logger.Log(LogLevelDebug, "flushed") + + // At this point, if lingering is configured, nothing will _start_ a + // linger because the producer's flushing atomic int32 is nonzero. We + // must wake anything that could be lingering up, after which all sinks + // will loop draining. + if cl.cfg.linger > 0 || cl.cfg.manualFlushing { + for _, parts := range p.topics.load() { + for _, part := range parts.load().partitions { + part.records.unlingerAndManuallyDrain() + } + } + } + + quit := false + done := make(chan struct{}) + go func() { + p.mu.Lock() + defer p.mu.Unlock() + defer close(done) + + for !quit && p.bufferedRecords+int64(p.blocked.Load()) > 0 { + p.c.Wait() + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() + return ctx.Err() + } +} + +func (p *producer) pause(ctx context.Context) error { + p.inflight.Add(1 << 48) + + quit := false + done := make(chan struct{}) + go func() { + p.mu.Lock() + defer p.mu.Unlock() + defer close(done) + for !quit && p.inflight.Load()&((1<<48)-1) != 0 { + p.c.Wait() + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() + p.resume() // dec our inflight + return ctx.Err() + } +} + +func (p *producer) resume() { + if p.inflight.Add(-1<<48) == 0 { + p.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.sink.maybeDrain() + }) + } +} + +func (p *producer) maybeAddInflight() bool { + if p.inflight.Load()>>48 > 0 { + return false + } + if p.inflight.Add(1)>>48 > 0 { + p.decInflight() + return false + } + return true +} + +func (p *producer) decInflight() { + if p.inflight.Add(-1)>>48 > 0 { + p.mu.Lock() + p.mu.Unlock() //nolint:gocritic,staticcheck // We use the lock as a barrier, unlocking immediately is safe. + p.c.Broadcast() + } +} + +// Bumps the tries for all buffered records in the client. +// +// This is called whenever there is a problematic error that would affect the +// state of all buffered records as a whole: +// +// - if we cannot init a producer ID due to RequestWith errors, producing is useless +// - if we cannot add partitions to a txn due to RequestWith errors, producing is useless +// +// Note that these are specifically due to RequestWith errors, not due to +// receiving a response that has a retryable error code. That is, if our +// request keeps dying. +func (cl *Client) bumpRepeatedLoadErr(err error) { + p := &cl.producer + + for _, partitions := range p.topics.load() { + for _, partition := range partitions.load().partitions { + partition.records.bumpRepeatedLoadErr(err) + } + } + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + for _, unknown := range p.unknownTopics { + select { + case unknown.wait <- err: + default: + } + } +} + +// Clears all buffered records in the client with the given error. +// +// - closing client +// - aborting transaction +// - fatal AddPartitionsToTxn +// +// Because the error fails everything, we also empty our unknown topics and +// delete any topics that were still unknown from the producer's topics. +func (cl *Client) failBufferedRecords(err error) { + p := &cl.producer + + for _, partitions := range p.topics.load() { + for _, partition := range partitions.load().partitions { + recBuf := partition.records + recBuf.mu.Lock() + recBuf.failAllRecords(err) + recBuf.mu.Unlock() + } + } + + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + toStore := p.topics.clone() + defer p.topics.storeData(toStore) + + var toFail [][]promisedRec + for topic, unknown := range p.unknownTopics { + delete(toStore, topic) + delete(p.unknownTopics, topic) + close(unknown.wait) + toFail = append(toFail, unknown.buffered) + } + + for _, fail := range toFail { + p.promiseBatch(batchPromise{ + recs: fail, + err: err, + }) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go b/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go new file mode 100644 index 000000000000..4f1ebe6f524b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go @@ -0,0 +1,628 @@ +package kgo + +import ( + "context" + "errors" + "reflect" + "time" + "unsafe" +) + +// RecordHeader contains extra information that can be sent with Records. +type RecordHeader struct { + Key string + Value []byte +} + +// RecordAttrs contains additional meta information about a record, such as its +// compression or timestamp type. +type RecordAttrs struct { + // 6 bits are used right now for record batches, and we use the high + // bit to signify no timestamp due to v0 message set. + // + // bits 1 thru 3: + // 000 no compression + // 001 gzip + // 010 snappy + // 011 lz4 + // 100 zstd + // bit 4: timestamp type + // bit 5: is transactional + // bit 6: is control + // bit 8: no timestamp type + attrs uint8 +} + +// TimestampType specifies how Timestamp was determined. +// +// The default, 0, means that the timestamp was determined in a client +// when the record was produced. +// +// An alternative is 1, which is when the Timestamp is set in Kafka. +// +// Records pre 0.10.0 did not have timestamps and have value -1. +func (a RecordAttrs) TimestampType() int8 { + if a.attrs&0b1000_0000 != 0 { + return -1 + } + return int8(a.attrs&0b0000_1000) >> 3 +} + +// CompressionType signifies with which algorithm this record was compressed. +// +// 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is zstd. +func (a RecordAttrs) CompressionType() uint8 { + return a.attrs & 0b0000_0111 +} + +// IsTransactional returns whether a record is a part of a transaction. +func (a RecordAttrs) IsTransactional() bool { + return a.attrs&0b0001_0000 != 0 +} + +// IsControl returns whether a record is a "control" record (ABORT or COMMIT). +// These are generally not visible unless explicitly opted into. +func (a RecordAttrs) IsControl() bool { + return a.attrs&0b0010_0000 != 0 +} + +// Record is a record to write to Kafka. +type Record struct { + // Key is an optional field that can be used for partition assignment. + // + // This is generally used with a hash partitioner to cause all records + // with the same key to go to the same partition. + Key []byte + // Value is blob of data to write to Kafka. + Value []byte + + // Headers are optional key/value pairs that are passed along with + // records. + // + // These are purely for producers and consumers; Kafka does not look at + // this field and only writes it to disk. + Headers []RecordHeader + + // NOTE: if logAppendTime, timestamp is MaxTimestamp, not first + delta + // zendesk/ruby-kafka#706 + + // Timestamp is the timestamp that will be used for this record. + // + // Record batches are always written with "CreateTime", meaning that + // timestamps are generated by clients rather than brokers. + // + // When producing, if this field is not yet set, it is set to time.Now. + Timestamp time.Time + + // Topic is the topic that a record is written to. + // + // This must be set for producing. + Topic string + + // Partition is the partition that a record is written to. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. If you use the ManualPartitioner, + // the value of this field is always the partition chosen when + // producing (i.e., you partition manually ahead of time). + Partition int32 + + // Attrs specifies what attributes were on this record. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + Attrs RecordAttrs + + // ProducerEpoch is the producer epoch of this message if it was + // produced with a producer ID. An epoch and ID of 0 means it was not. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + ProducerEpoch int16 + + // ProducerEpoch is the producer ID of this message if it was produced + // with a producer ID. An epoch and ID of 0 means it was not. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + ProducerID int64 + + // LeaderEpoch is the leader epoch of the broker at the time this + // record was written, or -1 if on message sets. + // + // For committing records, it is not recommended to modify the + // LeaderEpoch. Clients use the LeaderEpoch for data loss detection. + LeaderEpoch int32 + + // Offset is the offset that a record is written as. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. If you are producing with no acks, + // this will just be the offset used in the produce request and does + // not mirror the offset actually stored within Kafka. + Offset int64 + + // Context is an optional field that is used for enriching records. + // + // If this field is nil when producing, it is set to the Produce ctx + // arg. This field can be used to propagate record enrichment across + // producer hooks. It can also be set in a consumer hook to propagate + // enrichment to consumer clients. + Context context.Context +} + +func (r *Record) userSize() int64 { + s := len(r.Key) + len(r.Value) + for _, h := range r.Headers { + s += len(h.Key) + len(h.Value) + } + return int64(s) +} + +// When buffering records, we calculate the length and tsDelta ahead of time +// (also because number width affects encoding length). We repurpose the Offset +// field to save space. +func (r *Record) setLengthAndTimestampDelta(length int32, tsDelta int64) { + r.LeaderEpoch = length + r.Offset = tsDelta +} + +func (r *Record) lengthAndTimestampDelta() (length int32, tsDelta int64) { + return r.LeaderEpoch, r.Offset +} + +// AppendFormat appends a record to b given the layout or returns an error if +// the layout is invalid. This is a one-off shortcut for using +// NewRecordFormatter. See that function's documentation for the layout +// specification. +func (r *Record) AppendFormat(b []byte, layout string) ([]byte, error) { + f, err := NewRecordFormatter(layout) + if err != nil { + return b, err + } + return f.AppendRecord(b, r), nil +} + +// StringRecord returns a Record with the Value field set to the input value +// string. For producing, this function is useful in tandem with the +// client-level DefaultProduceTopic option. +// +// This function uses the 'unsafe' package to avoid copying value into a slice. +// +// NOTE: It is NOT SAFE to modify the record's value. This function should only +// be used if you only ever read record fields. This function can safely be used +// for producing; the client never modifies a record's key nor value fields. +func StringRecord(value string) *Record { + var slice []byte + slicehdr := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) //nolint:gosec // known way to convert string to slice + slicehdr.Data = ((*reflect.StringHeader)(unsafe.Pointer(&value))).Data //nolint:gosec // known way to convert string to slice + slicehdr.Len = len(value) + slicehdr.Cap = len(value) + + return &Record{Value: slice} +} + +// KeyStringRecord returns a Record with the Key and Value fields set to the +// input key and value strings. For producing, this function is useful in +// tandem with the client-level DefaultProduceTopic option. +// +// This function uses the 'unsafe' package to avoid copying value into a slice. +// +// NOTE: It is NOT SAFE to modify the record's value. This function should only +// be used if you only ever read record fields. This function can safely be used +// for producing; the client never modifies a record's key nor value fields. +func KeyStringRecord(key, value string) *Record { + r := StringRecord(value) + + keyhdr := (*reflect.SliceHeader)(unsafe.Pointer(&r.Key)) //nolint:gosec // known way to convert string to slice + keyhdr.Data = ((*reflect.StringHeader)(unsafe.Pointer(&key))).Data //nolint:gosec // known way to convert string to slice + keyhdr.Len = len(key) + keyhdr.Cap = len(key) + + return r +} + +// SliceRecord returns a Record with the Value field set to the input value +// slice. For producing, this function is useful in tandem with the +// client-level DefaultProduceTopic option. +func SliceRecord(value []byte) *Record { + return &Record{Value: value} +} + +// KeySliceRecord returns a Record with the Key and Value fields set to the +// input key and value slices. For producing, this function is useful in +// tandem with the client-level DefaultProduceTopic option. +func KeySliceRecord(key, value []byte) *Record { + return &Record{Key: key, Value: value} +} + +// FetchPartition is a response for a partition in a fetched topic from a +// broker. +type FetchPartition struct { + // Partition is the partition this is for. + Partition int32 + // Err is an error for this partition in the fetch. + // + // Note that if this is a fatal error, such as data loss or non + // retryable errors, this partition will never be fetched again. + Err error + // HighWatermark is the current high watermark for this partition, that + // is, the current offset that is on all in sync replicas. + HighWatermark int64 + // LastStableOffset is the offset at which all prior offsets have been + // "decided". Non transactional records are always decided immediately, + // but transactional records are only decided once they are committed + // or aborted. + // + // The LastStableOffset will always be at or under the HighWatermark. + LastStableOffset int64 + // LogStartOffset is the low watermark of this partition, otherwise + // known as the earliest offset in the partition. + LogStartOffset int64 + // Records contains feched records for this partition. + Records []*Record +} + +// EachRecord calls fn for each record in the partition. +func (p *FetchPartition) EachRecord(fn func(*Record)) { + for _, r := range p.Records { + fn(r) + } +} + +// FetchTopic is a response for a fetched topic from a broker. +type FetchTopic struct { + // Topic is the topic this is for. + Topic string + // Partitions contains individual partitions in the topic that were + // fetched. + Partitions []FetchPartition +} + +// EachPartition calls fn for each partition in Fetches. +func (t *FetchTopic) EachPartition(fn func(FetchPartition)) { + for i := range t.Partitions { + fn(t.Partitions[i]) + } +} + +// EachRecord calls fn for each record in the topic, in any partition order. +func (t *FetchTopic) EachRecord(fn func(*Record)) { + for i := range t.Partitions { + for _, r := range t.Partitions[i].Records { + fn(r) + } + } +} + +// Records returns all records in all partitions in this topic. +// +// This is a convenience function that does a single slice allocation. If you +// can process records individually, it is far more efficient to use the Each +// functions. +func (t *FetchTopic) Records() []*Record { + var n int + t.EachPartition(func(p FetchPartition) { + n += len(p.Records) + }) + rs := make([]*Record, 0, n) + t.EachPartition(func(p FetchPartition) { + rs = append(rs, p.Records...) + }) + return rs +} + +// Fetch is an individual response from a broker. +type Fetch struct { + // Topics are all topics being responded to from a fetch to a broker. + Topics []FetchTopic +} + +// Fetches is a group of fetches from brokers. +type Fetches []Fetch + +// FetchError is an error in a fetch along with the topic and partition that +// the error was on. +type FetchError struct { + Topic string + Partition int32 + Err error +} + +// Errors returns all errors in a fetch with the topic and partition that +// errored. +// +// There are a few classes of errors possible: +// +// 1. a normal kerr.Error; these are usually the non-retryable kerr.Errors, +// but theoretically a non-retryable error can be fixed at runtime (auth +// error? fix auth). It is worth restarting the client for these errors if +// you do not intend to fix this problem at runtime. +// +// 2. an injected *ErrDataLoss; these are informational, the client +// automatically resets consuming to where it should and resumes. This +// error is worth logging and investigating, but not worth restarting the +// client for. +// +// 3. an untyped batch parse failure; these are usually unrecoverable by +// restarts, and it may be best to just let the client continue. However, +// restarting is an option, but you may need to manually repair your +// partition. +// +// 4. an injected ErrClientClosed; this is a fatal informational error that +// is returned from every Poll call if the client has been closed. +// A corresponding helper function IsClientClosed can be used to detect +// this error. +// +// 5. an injected context error; this can be present if the context you were +// using for polling timed out or was canceled. +// +// 6. an injected ErrGroupSession; this is an informational error that is +// injected once a group session is lost in a way that is not the standard +// rebalance. This error can signify that your consumer member is not able +// to connect to the group (ACL problems, unreachable broker), or you +// blocked rebalancing for too long, or your callbacks took too long. +// +// This list may grow over time. +func (fs Fetches) Errors() []FetchError { + var errs []FetchError + fs.EachError(func(t string, p int32, err error) { + errs = append(errs, FetchError{t, p, err}) + }) + return errs +} + +// When we fetch, it is possible for Kafka to reply with topics / partitions +// that have no records and no errors. This will definitely happen outside of +// fetch sessions, but may also happen at other times (for some reason). +// When that happens we want to ignore the fetch. +func (f Fetch) hasErrorsOrRecords() bool { + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + if p.Err != nil || len(p.Records) > 0 { + return true + } + } + } + return false +} + +// IsClientClosed returns whether the fetches include an error indicating that +// the client is closed. +// +// This function is useful to break out of a poll loop; you likely want to call +// this function before calling Errors. If you may cancel the context to poll, +// you may want to use Err0 and manually check errors.Is(ErrClientClosed) or +// errors.Is(context.Canceled). +func (fs Fetches) IsClientClosed() bool { + // An injected ErrClientClosed is a single fetch with one topic and + // one partition. We can use this to make IsClientClosed do less work. + return len(fs) == 1 && len(fs[0].Topics) == 1 && len(fs[0].Topics[0].Partitions) == 1 && errors.Is(fs[0].Topics[0].Partitions[0].Err, ErrClientClosed) +} + +// Err0 returns the error at the 0th index fetch, topic, and partition. This +// can be used to quickly check if polling returned early because the client +// was closed or the context was canceled and is faster than performing a +// linear scan over all partitions with Err. When the client is closed or the +// context is canceled, fetches will contain only one partition whose Err field +// indicates the close / cancel. Note that this returns whatever the first +// error is, nil or non-nil, and does not check for a specific error value. +func (fs Fetches) Err0() error { + if len(fs) > 0 && len(fs[0].Topics) > 0 && len(fs[0].Topics[0].Partitions) > 0 { + return fs[0].Topics[0].Partitions[0].Err + } + return nil +} + +// Err returns the first error in all fetches, if any. This can be used to +// quickly check if the client is closed or your poll context was canceled, or +// to check if there's some other error that requires deeper investigation with +// EachError. This function performs a linear scan over all fetched partitions. +// It is recommended to always check all errors. If you would like to more +// quickly check ahead of time if a poll was canceled because of closing the +// client or canceling the context, you can use Err0. +func (fs Fetches) Err() error { + for _, f := range fs { + for i := range f.Topics { + ft := &f.Topics[i] + for j := range ft.Partitions { + fp := &ft.Partitions[j] + if fp.Err != nil { + return fp.Err + } + } + } + } + return nil +} + +// EachError calls fn for every partition that had a fetch error with the +// topic, partition, and error. +// +// This function has the same semantics as the Errors function; refer to the +// documentation on that function for what types of errors are possible. +func (fs Fetches) EachError(fn func(string, int32, error)) { + for _, f := range fs { + for i := range f.Topics { + ft := &f.Topics[i] + for j := range ft.Partitions { + fp := &ft.Partitions[j] + if fp.Err != nil { + fn(ft.Topic, fp.Partition, fp.Err) + } + } + } + } +} + +// RecordIter returns an iterator over all records in a fetch. +// +// Note that errors should be inspected as well. +func (fs Fetches) RecordIter() *FetchesRecordIter { + iter := &FetchesRecordIter{fetches: fs} + iter.prepareNext() + return iter +} + +// FetchesRecordIter iterates over records in a fetch. +type FetchesRecordIter struct { + fetches []Fetch + ti int // index to current topic in fetches[0] + pi int // index to current partition in current topic + ri int // index to current record in current partition +} + +// Done returns whether there are any more records to iterate over. +func (i *FetchesRecordIter) Done() bool { + return len(i.fetches) == 0 +} + +// Next returns the next record from a fetch. +func (i *FetchesRecordIter) Next() *Record { + next := i.fetches[0].Topics[i.ti].Partitions[i.pi].Records[i.ri] + i.ri++ + i.prepareNext() + return next +} + +func (i *FetchesRecordIter) prepareNext() { +beforeFetch0: + if len(i.fetches) == 0 { + return + } + + fetch0 := &i.fetches[0] +beforeTopic: + if i.ti >= len(fetch0.Topics) { + i.fetches = i.fetches[1:] + i.ti = 0 + goto beforeFetch0 + } + + topic := &fetch0.Topics[i.ti] +beforePartition: + if i.pi >= len(topic.Partitions) { + i.ti++ + i.pi = 0 + goto beforeTopic + } + + partition := &topic.Partitions[i.pi] + if i.ri >= len(partition.Records) { + i.pi++ + i.ri = 0 + goto beforePartition + } +} + +// EachPartition calls fn for each partition in Fetches. +// +// Partitions are not visited in any specific order, and a topic may be visited +// multiple times if it is spread across fetches. +func (fs Fetches) EachPartition(fn func(FetchTopicPartition)) { + for _, fetch := range fs { + for _, topic := range fetch.Topics { + for i := range topic.Partitions { + fn(FetchTopicPartition{ + Topic: topic.Topic, + FetchPartition: topic.Partitions[i], + }) + } + } + } +} + +// EachTopic calls fn for each topic in Fetches. +// +// This is a convenience function that groups all partitions for the same topic +// from many fetches into one FetchTopic. A map is internally allocated to +// group partitions per topic before calling fn. +func (fs Fetches) EachTopic(fn func(FetchTopic)) { + switch len(fs) { + case 0: + return + case 1: + for _, topic := range fs[0].Topics { + fn(topic) + } + return + } + + topics := make(map[string][]FetchPartition) + for _, fetch := range fs { + for _, topic := range fetch.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + } + + for topic, partitions := range topics { + fn(FetchTopic{ + topic, + partitions, + }) + } +} + +// EachRecord calls fn for each record in Fetches. +// +// This is very similar to using a record iter, and is solely a convenience +// function depending on which style you prefer. +func (fs Fetches) EachRecord(fn func(*Record)) { + for iter := fs.RecordIter(); !iter.Done(); { + fn(iter.Next()) + } +} + +// Records returns all records in all fetches. +// +// This is a convenience function that does a single slice allocation. If you +// can process records individually, it is far more efficient to use the Each +// functions or the RecordIter. +func (fs Fetches) Records() []*Record { + rs := make([]*Record, 0, fs.NumRecords()) + fs.EachPartition(func(p FetchTopicPartition) { + rs = append(rs, p.Records...) + }) + return rs +} + +// NumRecords returns the total number of records across all fetched partitions. +func (fs Fetches) NumRecords() (n int) { + fs.EachPartition(func(p FetchTopicPartition) { + n += len(p.Records) + }) + return n +} + +// Empty checks whether the fetch result empty. This method is faster than NumRecords() == 0. +func (fs Fetches) Empty() bool { + for i := range fs { + for j := range fs[i].Topics { + for k := range fs[i].Topics[j].Partitions { + if len(fs[i].Topics[j].Partitions[k].Records) > 0 { + return false + } + } + } + } + + return true +} + +// FetchTopicPartition is similar to FetchTopic, but for an individual +// partition. +type FetchTopicPartition struct { + // Topic is the topic this is for. + Topic string + // FetchPartition is an individual partition within this topic. + FetchPartition +} + +// EachRecord calls fn for each record in the topic's partition. +func (r *FetchTopicPartition) EachRecord(fn func(*Record)) { + for _, r := range r.Records { + fn(r) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go b/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go new file mode 100644 index 000000000000..2f5d2ce3c33a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go @@ -0,0 +1,2246 @@ +package kgo + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/twmb/franz-go/pkg/kbin" +) + +//////////// +// WRITER // +//////////// + +// RecordFormatter formats records. +type RecordFormatter struct { + calls atomicI64 + fns []func([]byte, *FetchPartition, *Record) []byte +} + +// AppendRecord appends a record to b given the parsed format and returns the +// updated slice. +func (f *RecordFormatter) AppendRecord(b []byte, r *Record) []byte { + for _, fn := range f.fns { + b = fn(b, nil, r) + } + return b +} + +// AppendPartitionRecord appends a record and partition to b given the parsed +// format and returns the updated slice. +func (f *RecordFormatter) AppendPartitionRecord(b []byte, p *FetchPartition, r *Record) []byte { + for _, fn := range f.fns { + b = fn(b, p, r) + } + return b +} + +// NewRecordFormatter returns a formatter for the given layout, or an error if +// the layout is invalid. +// +// The formatter is very powerful, as such there is a lot to describe. This +// documentation attempts to be as succinct as possible. +// +// Similar to the fmt package, record formatting is based off of slash escapes +// and percent "verbs" (copying fmt package lingo). Slashes are used for common +// escapes, +// +// \t \n \r \\ \xNN +// +// printing tabs, newlines, carriage returns, slashes, and hex encoded +// characters. +// +// Percent encoding opts in to printing aspects of either a record or a fetch +// partition: +// +// %t topic +// %T topic length +// %k key +// %K key length +// %v value +// %V value length +// %h begin the header specification +// %H number of headers +// %p partition +// %o offset +// %e leader epoch +// %d timestamp (date, formatting described below) +// %a record attributes (formatting required, described below) +// %x producer id +// %y producer epoch +// +// For AppendPartitionRecord, the formatter also undersands the following three +// formatting options: +// +// %[ partition log start offset +// %| partition last stable offset +// %] partition high watermark +// +// The formatter internally tracks the number of times AppendRecord or +// AppendPartitionRecord have been called. The special option %i prints the +// iteration / call count: +// +// %i format iteration number (starts at 1) +// +// Lastly, there are three escapes to print raw characters that are usually +// used for formatting options: +// +// %% percent sign +// %{ left brace (required if a brace is after another format option) +// %} right brace +// +// # Header specification +// +// Specifying headers is essentially a primitive nested format option, +// accepting the key and value escapes above: +// +// %K header key length +// %k header key +// %V header value length +// %v header value +// +// For example, "%H %h{%k %v }" will print the number of headers, and then each +// header key and value with a space after each. +// +// # Verb modifiers +// +// Most of the previous verb specifications can be modified by adding braces +// with a given modifier, e.g., "%V{ascii}". All modifiers are described below. +// +// # Numbers +// +// All number verbs accept braces that control how the number is printed: +// +// %v{ascii} the default, print the number as ascii +// %v{number} alias for ascii +// +// %v{hex64} print 16 hex characters for the number +// %v{hex32} print 8 hex characters for the number +// %v{hex16} print 4 hex characters for the number +// %v{hex8} print 2 hex characters for the number +// %v{hex4} print 1 hex characters for the number +// %v{hex} print as many hex characters as necessary for the number +// +// %v{big64} print the number in big endian uint64 format +// %v{big32} print the number in big endian uint32 format +// %v{big16} print the number in big endian uint16 format +// %v{big8} alias for byte +// +// %v{little64} print the number in little endian uint64 format +// %v{little32} print the number in little endian uint32 format +// %v{little16} print the number in little endian uint16 format +// %v{little8} alias for byte +// +// %v{byte} print the number as a single byte +// %v{bool} print "true" if the number is non-zero, otherwise "false" +// +// All numbers are truncated as necessary per each given format. +// +// # Timestamps +// +// Timestamps can be specified in three formats: plain number formatting, +// native Go timestamp formatting, or strftime formatting. Number formatting is +// follows the rules above using the millisecond timestamp value. Go and +// strftime have further internal format options: +// +// %d{go##2006-01-02T15:04:05Z07:00##} +// %d{strftime[%F]} +// +// An arbitrary amount of pounds, braces, and brackets are understood before +// beginning the actual timestamp formatting. For Go formatting, the format is +// simply passed to the time package's AppendFormat function. For strftime, all +// "man strftime" options are supported. Time is always in UTC. +// +// # Attributes +// +// Records attributes require formatting, where each formatting option selects +// which attribute to print and how to print it. +// +// %a{compression} +// %a{compression;number} +// %a{compression;big64} +// %a{compression;hex8} +// +// By default, prints the compression as text ("none", "gzip", ...). +// Compression can be printed as a number with ";number", where number is any +// number formatting option described above. +// +// %a{timestamp-type} +// %a{timestamp-type;big64} +// +// Prints -1 for pre-0.10 records, 0 for client generated timestamps, and 1 for +// broker generated. Number formatting can be controlled with ";number". +// +// %a{transactional-bit} +// %a{transactional-bit;bool} +// +// Prints 1 if the record is a part of a transaction or 0 if it is not. Number +// formatting can be controlled with ";number". +// +// %a{control-bit} +// %a{control-bit;bool} +// +// Prints 1 if the record is a commit marker or 0 if it is not. Number +// formatting can be controlled with ";number". +// +// # Text +// +// Topics, keys, and values have "base64", "base64raw", "hex", and "unpack" +// formatting options: +// +// %t{hex} +// %k{unpack{iIqQc.$}} +// %v{base64} +// %v{base64raw} +// +// Unpack formatting is inside of enclosing pounds, braces, or brackets, the +// same way that timestamp formatting is understood. The syntax roughly follows +// Python's struct packing/unpacking rules: +// +// x pad character (does not parse input) +// < parse what follows as little endian +// > parse what follows as big endian +// +// b signed byte +// B unsigned byte +// h int16 ("half word") +// H uint16 ("half word") +// i int32 +// I uint32 +// q int64 ("quad word") +// Q uint64 ("quad word") +// +// c any character +// . alias for c +// s consume the rest of the input as a string +// $ match the end of the line (append error string if anything remains) +// +// Unlike python, a '<' or '>' can appear anywhere in the format string and +// affects everything that follows. It is possible to switch endianness +// multiple times. If the parser needs more data than available, or if the more +// input remains after '$', an error message will be appended. +func NewRecordFormatter(layout string) (*RecordFormatter, error) { + var f RecordFormatter + + var literal []byte // non-formatted raw text to output + var i int + for len(layout) > 0 { + i++ + c, size := utf8.DecodeRuneInString(layout) + rawc := layout[:size] + layout = layout[size:] + switch c { + default: + literal = append(literal, rawc...) + continue + + case '\\': + c, n, err := parseLayoutSlash(layout) + if err != nil { + return nil, err + } + layout = layout[n:] + literal = append(literal, c) + continue + + case '%': + } + + if len(layout) == 0 { + return nil, errors.New("invalid escape sequence at end of layout string") + } + + cNext, size := utf8.DecodeRuneInString(layout) + if cNext == '%' || cNext == '{' || cNext == '}' { + literal = append(literal, byte(cNext)) + layout = layout[size:] + continue + } + + var ( + isOpenBrace = len(layout) > 2 && layout[1] == '{' + handledBrace bool + escaped = layout[0] + ) + layout = layout[1:] + + // We are entering a format string. If we have any built + // literal before, this is now raw text that we will append. + if len(literal) > 0 { + l := literal + literal = nil + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { return append(b, l...) }) + } + + if isOpenBrace { // opening a brace: layout continues after + layout = layout[1:] + } + + switch escaped { + default: + return nil, fmt.Errorf("unknown escape sequence %%%s", string(escaped)) + + case 'T', 'K', 'V', 'H', 'p', 'o', 'e', 'i', 'x', 'y', '[', '|', ']': + // Numbers default to ascii, but we support a bunch of + // formatting options. We parse the format here, and + // then below is switching on which field to print. + var numfn func([]byte, int64) []byte + if handledBrace = isOpenBrace; handledBrace { + numfn2, n, err := parseNumWriteLayout(layout) + if err != nil { + return nil, err + } + layout = layout[n:] + numfn = numfn2 + } else { + numfn = writeNumASCII + } + switch escaped { + case 'T': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Topic))) }) + }) + case 'K': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Key))) }) + }) + case 'V': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Value))) }) + }) + case 'H': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Headers))) }) + }) + case 'p': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.Partition)) }) + }) + case 'o': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.Offset) }) + }) + case 'e': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.LeaderEpoch)) }) + }) + case 'i': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { + return numfn(b, f.calls.Add(1)) + }) + case 'x': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.ProducerID) }) + }) + case 'y': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.ProducerEpoch)) }) + }) + case '[': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.LogStartOffset) }) + }) + case '|': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.LastStableOffset) }) + }) + case ']': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.HighWatermark) }) + }) + } + + case 't', 'k', 'v': + var appendFn func([]byte, []byte) []byte + if handledBrace = isOpenBrace; handledBrace { + switch { + case strings.HasPrefix(layout, "}"): + layout = layout[len("}"):] + appendFn = appendPlain + case strings.HasPrefix(layout, "base64}"): + appendFn = appendBase64 + layout = layout[len("base64}"):] + case strings.HasPrefix(layout, "base64raw}"): + appendFn = appendBase64raw + layout = layout[len("base64raw}"):] + case strings.HasPrefix(layout, "hex}"): + appendFn = appendHex + layout = layout[len("hex}"):] + case strings.HasPrefix(layout, "unpack"): + unpack, rem, err := nomOpenClose(layout[len("unpack"):]) + if err != nil { + return nil, fmt.Errorf("unpack parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("unpack missing closing } in %q", layout) + } + layout = rem[1:] + appendFn, err = parseUnpack(unpack) + if err != nil { + return nil, fmt.Errorf("unpack formatting parse err: %v", err) + } + + default: + return nil, fmt.Errorf("unknown %%%s{ escape", string(escaped)) + } + } else { + appendFn = appendPlain + } + switch escaped { + case 't': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, []byte(r.Topic)) }) + }) + case 'k': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, r.Key) }) + }) + case 'v': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, r.Value) }) + }) + } + + case 'a': + if !isOpenBrace { + return nil, errors.New("missing open brace sequence on %a signifying how attributes should be written") + } + handledBrace = true + + num := func(skipText string, rfn func(*Record) int64) error { + layout = layout[len(skipText):] + numfn, n, err := parseNumWriteLayout(layout) + if err != nil { + return err + } + layout = layout[n:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, rfn(r)) }) + }) + return nil + } + bi64 := func(b bool) int64 { + if b { + return 1 + } + return 0 + } + + switch { + case strings.HasPrefix(layout, "compression}"): + layout = layout[len("compression}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + switch codecType(r.Attrs.CompressionType()) { + case codecNone: + return append(b, "none"...) + case codecGzip: + return append(b, "gzip"...) + case codecSnappy: + return append(b, "snappy"...) + case codecLZ4: + return append(b, "lz4"...) + case codecZstd: + return append(b, "zstd"...) + default: + return append(b, "unknown"...) + } + }) + }) + case strings.HasPrefix(layout, "compression;"): + if err := num("compression;", func(r *Record) int64 { return int64(r.Attrs.CompressionType()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "timestamp-type}"): + layout = layout[len("timestamp-type}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + return strconv.AppendInt(b, int64(r.Attrs.TimestampType()), 10) + }) + }) + case strings.HasPrefix(layout, "timestamp-type;"): + if err := num("timestamp-type;", func(r *Record) int64 { return int64(r.Attrs.TimestampType()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "transactional-bit}"): + layout = layout[len("transactional-bit}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + if r.Attrs.IsTransactional() { + return append(b, '1') + } + return append(b, '0') + }) + }) + case strings.HasPrefix(layout, "transactional-bit;"): + if err := num("transactional-bit;", func(r *Record) int64 { return bi64(r.Attrs.IsTransactional()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "control-bit}"): + layout = layout[len("control-bit}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + if r.Attrs.IsControl() { + return append(b, '1') + } + return append(b, '0') + }) + }) + case strings.HasPrefix(layout, "control-bit;"): + if err := num("control-bit;", func(r *Record) int64 { return bi64(r.Attrs.IsControl()) }); err != nil { + return nil, err + } + + default: + return nil, errors.New("unknown %a formatting") + } + + case 'h': + if !isOpenBrace { + return nil, errors.New("missing open brace sequence on %h signifying how headers are written") + } + handledBrace = true + // Headers can have their own internal braces, so we + // must look for a matching end brace. + braces := 1 + at := 0 + for braces != 0 && len(layout[at:]) > 0 { + switch layout[at] { + case '{': + if at > 0 && layout[at-1] != '%' { + braces++ + } + case '}': + if at > 0 && layout[at-1] != '%' { + braces-- + } + } + at++ + } + if braces > 0 { + return nil, fmt.Errorf("invalid header specification: missing closing brace in %q", layout) + } + + spec := layout[:at-1] + layout = layout[at:] + inf, err := NewRecordFormatter(spec) + if err != nil { + return nil, fmt.Errorf("invalid header specification %q: %v", spec, err) + } + + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + reuse := new(Record) + for _, header := range r.Headers { + reuse.Key = []byte(header.Key) + reuse.Value = header.Value + b = inf.AppendRecord(b, reuse) + } + return b + }) + + case 'd': + // For datetime parsing, we support plain millis in any + // number format, strftime, or go formatting. We + // default to plain ascii millis. + handledBrace = isOpenBrace + if !handledBrace { + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return strconv.AppendInt(b, r.Timestamp.UnixNano()/1e6, 10) }) + }) + continue + } + + switch { + case strings.HasPrefix(layout, "strftime"): + tfmt, rem, err := nomOpenClose(layout[len("strftime"):]) + if err != nil { + return nil, fmt.Errorf("strftime parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("%%d{strftime missing closing } in %q", layout) + } + layout = rem[1:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return strftimeAppendFormat(b, tfmt, r.Timestamp.UTC()) }) + }) + + case strings.HasPrefix(layout, "go"): + tfmt, rem, err := nomOpenClose(layout[len("go"):]) + if err != nil { + return nil, fmt.Errorf("go parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("%%d{go missing closing } in %q", layout) + } + layout = rem[1:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return r.Timestamp.UTC().AppendFormat(b, tfmt) }) + }) + + default: + numfn, n, err := parseNumWriteLayout(layout) + if err != nil { + return nil, fmt.Errorf("unknown %%d{ time specification in %q", layout) + } + layout = layout[n:] + + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.Timestamp.UnixNano()/1e6) }) + }) + } + } + + // If we opened a brace, we require a closing brace. + if isOpenBrace && !handledBrace { + return nil, fmt.Errorf("unhandled open brace %q", layout) + } + } + + // Ensure we print any trailing text. + if len(literal) > 0 { + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { return append(b, literal...) }) + } + + return &f, nil +} + +func appendPlain(dst, src []byte) []byte { + return append(dst, src...) +} + +func appendBase64(dst, src []byte) []byte { + fin := append(dst, make([]byte, base64.StdEncoding.EncodedLen(len(src)))...) + base64.StdEncoding.Encode(fin[len(dst):], src) + return fin +} + +func appendBase64raw(dst, src []byte) []byte { + fin := append(dst, make([]byte, base64.RawStdEncoding.EncodedLen(len(src)))...) + base64.RawStdEncoding.Encode(fin[len(dst):], src) + return fin +} + +func appendHex(dst, src []byte) []byte { + fin := append(dst, make([]byte, hex.EncodedLen(len(src)))...) + hex.Encode(fin[len(dst):], src) + return fin +} + +// nomOpenClose extracts a middle section from a string beginning with repeated +// delimiters and returns it as with remaining (past end delimiters) string. +func nomOpenClose(src string) (middle, remaining string, err error) { + if len(src) == 0 { + return "", "", errors.New("empty layout") + } + delim := src[0] + openers := 1 + for openers < len(src) && src[openers] == delim { + openers++ + } + switch delim { + case '{': + delim = '}' + case '[': + delim = ']' + case '(': + delim = ')' + } + src = src[openers:] + end := strings.Repeat(string(delim), openers) + idx := strings.Index(src, end) + if idx < 0 { + return "", "", fmt.Errorf("missing end delim %q", end) + } + middle = src[:idx] + return middle, src[idx+len(end):], nil +} + +func parseUnpack(layout string) (func([]byte, []byte) []byte, error) { + // take dst, src; return dst + // %!q(eof) + // take 8 bytes, decode it, print decoded + var fns []func([]byte, []byte) ([]byte, int) + little := true + var sawEnd bool + for i := range layout { + if sawEnd { + return nil, errors.New("already saw end-of-input parsing character") + } + + var need int + var signed bool + cs := layout[i : i+1] + switch cs[0] { + case 'x': + continue + + case '<': + little = true + continue + case '>': + little = false + continue + + case 'b': + need = 1 + signed = true + case 'B': + need = 1 + case 'h': + need = 2 + signed = true + case 'H': + need = 2 + case 'i': + need = 4 + signed = true + case 'I': + need = 4 + case 'q': + need = 8 + signed = true + case 'Q': + need = 8 + + case 'c', '.': + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) < 1 { + return append(dst, "%!c(no bytes available)"...), 0 + } + return append(dst, src[0]), 1 + }) + continue + + case 's': + sawEnd = true + fns = append(fns, func(dst, src []byte) ([]byte, int) { + return append(dst, src...), len(src) + }) + continue + + case '$': + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) != 0 { + dst = append(dst, "%!$(not end-of-input)"...) + } + return dst, len(src) + }) + sawEnd = true + continue + + default: + return nil, fmt.Errorf("invalid unpack parsing character %s", cs) + } + + islittle := little + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) < need { + return append(dst, fmt.Sprintf("%%!%%s(have %d bytes, need %d)", len(src), need)...), len(src) + } + + var ul, ub uint64 + var il, ib int64 + switch need { + case 1: + ul = uint64(src[0]) + ub = ul + il = int64(byte(ul)) + ib = int64(byte(ub)) + case 2: + ul = uint64(binary.LittleEndian.Uint16(src)) + ub = uint64(binary.BigEndian.Uint16(src)) + il = int64(int16(ul)) + ib = int64(int16(ub)) + case 4: + ul = uint64(binary.LittleEndian.Uint32(src)) + ub = uint64(binary.BigEndian.Uint32(src)) + il = int64(int32(ul)) + ib = int64(int32(ub)) + case 8: + ul = binary.LittleEndian.Uint64(src) + ub = binary.BigEndian.Uint64(src) + il = int64(ul) + ib = int64(ub) + } + u := ub + i := ib + if islittle { + u = ul + i = il + } + + if signed { + return strconv.AppendInt(dst, i, 10), need + } + return strconv.AppendUint(dst, u, 10), need + }) + } + + return func(dst, src []byte) []byte { + for _, fn := range fns { + var n int + dst, n = fn(dst, src) + src = src[n:] + } + return dst + }, nil +} + +func parseNumWriteLayout(layout string) (func([]byte, int64) []byte, int, error) { + braceEnd := strings.IndexByte(layout, '}') + if braceEnd == -1 { + return nil, 0, errors.New("missing brace end } to close number format specification") + } + end := braceEnd + 1 + switch layout = layout[:braceEnd]; layout { + case "ascii", "number": + return writeNumASCII, end, nil + case "hex64": + return writeNumHex64, end, nil + case "hex32": + return writeNumHex32, end, nil + case "hex16": + return writeNumHex16, end, nil + case "hex8": + return writeNumHex8, end, nil + case "hex4": + return writeNumHex4, end, nil + case "hex": + return writeNumHex, end, nil + case "big64": + return writeNumBig64, end, nil + case "big32": + return writeNumBig32, end, nil + case "big16": + return writeNumBig16, end, nil + case "byte", "big8", "little8": + return writeNumByte, end, nil + case "little64": + return writeNumLittle64, end, nil + case "little32": + return writeNumLittle32, end, nil + case "little16": + return writeNumLittle16, end, nil + case "bool": + return writeNumBool, end, nil + default: + return nil, 0, fmt.Errorf("invalid output number layout %q", layout) + } +} + +func writeR(b []byte, r *Record, fn func([]byte, *Record) []byte) []byte { + if r == nil { + return append(b, ""...) + } + return fn(b, r) +} + +func writeP(b []byte, p *FetchPartition, fn func([]byte, *FetchPartition) []byte) []byte { + if p == nil { + return append(b, ""...) + } + return fn(b, p) +} +func writeNumASCII(b []byte, n int64) []byte { return strconv.AppendInt(b, n, 10) } + +const hexc = "0123456789abcdef" + +func writeNumHex64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>60)&0xf], + hexc[(u>>56)&0xf], + hexc[(u>>52)&0xf], + hexc[(u>>48)&0xf], + hexc[(u>>44)&0xf], + hexc[(u>>40)&0xf], + hexc[(u>>36)&0xf], + hexc[(u>>32)&0xf], + hexc[(u>>28)&0xf], + hexc[(u>>24)&0xf], + hexc[(u>>20)&0xf], + hexc[(u>>16)&0xf], + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>28)&0xf], + hexc[(u>>24)&0xf], + hexc[(u>>20)&0xf], + hexc[(u>>16)&0xf], + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex16(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex8(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex4(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[u&0xf], + ) +} + +func writeNumHex(b []byte, n int64) []byte { + return strconv.AppendUint(b, uint64(n), 16) +} + +func writeNumBig64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +func writeNumLittle64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8), byte(u>>16), byte(u>>24), byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) +} + +func writeNumBig32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +func writeNumLittle32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) +} +func writeNumBig16(b []byte, n int64) []byte { u := uint64(n); return append(b, byte(u>>8), byte(u)) } +func writeNumLittle16(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8)) +} +func writeNumByte(b []byte, n int64) []byte { u := uint64(n); return append(b, byte(u)) } + +func writeNumBool(b []byte, n int64) []byte { + if n == 0 { + return append(b, "false"...) + } + return append(b, "true"...) +} + +//////////// +// READER // +//////////// + +// RecordReader reads records from an io.Reader. +type RecordReader struct { + r *bufio.Reader + + buf []byte + fns []readParse + + done bool +} + +// NewRecordReader returns a record reader for the given layout, or an error if +// the layout is invalid. +// +// Similar to the RecordFormatter, the RecordReader parsing is quite powerful. +// There is a bit less to describe in comparison to RecordFormatter, but still, +// this documentation attempts to be as succinct as possible. +// +// Similar to the fmt package, record parsing is based off of slash escapes and +// percent "verbs" (copying fmt package lingo). Slashes are used for common +// escapes, +// +// \t \n \r \\ \xNN +// +// reading tabs, newlines, carriage returns, slashes, and hex encoded +// characters. +// +// Percent encoding reads into specific values of a Record: +// +// %t topic +// %T topic length +// %k key +// %K key length +// %v value +// %V value length +// %h begin the header specification +// %H number of headers +// %p partition +// %o offset +// %e leader epoch +// %d timestamp +// %x producer id +// %y producer epoch +// +// If using length / number verbs (i.e., "sized" verbs), they must occur before +// what they are sizing. +// +// There are three escapes to parse raw characters, rather than opting into +// some formatting option: +// +// %% percent sign +// %{ left brace +// %} right brace +// +// Unlike record formatting, timestamps can only be read as numbers because Go +// or strftime formatting can both be variable length and do not play too well +// with delimiters. Timestamps numbers are read as milliseconds. +// +// # Numbers +// +// All size numbers can be parsed in the following ways: +// +// %v{ascii} parse numeric digits until a non-numeric +// %v{number} alias for ascii +// +// %v{hex64} read 16 hex characters for the number +// %v{hex32} read 8 hex characters for the number +// %v{hex16} read 4 hex characters for the number +// %v{hex8} read 2 hex characters for the number +// %v{hex4} read 1 hex characters for the number +// +// %v{big64} read the number as big endian uint64 format +// %v{big32} read the number as big endian uint32 format +// %v{big16} read the number as big endian uint16 format +// %v{big8} alias for byte +// +// %v{little64} read the number as little endian uint64 format +// %v{little32} read the number as little endian uint32 format +// %v{little16} read the number as little endian uint16 format +// %v{little8} read the number as a byte +// +// %v{byte} read the number as a byte +// %v{bool} read "true" as 1, "false" as 0 +// %v{3} read 3 characters (any number) +// +// # Header specification +// +// Similar to number formatting, headers are parsed using a nested primitive +// format option, accepting the key and value escapes previously mentioned. +// +// # Text +// +// Topics, keys, and values can be decoded using "base64", "hex", and "json" +// formatting options. Any size specification is the size of the encoded value +// actually being read (i.e., size as seen, not size when decoded). JSON values +// are compacted after being read. +// +// %T%t{hex} - 4abcd reads four hex characters "abcd" +// %V%v{base64} - 2z9 reads two base64 characters "z9" +// %v{json} %k - {"foo" : "bar"} foo reads a JSON object and then "foo" +// +// As well, these text options can be parsed with regular expressions: +// +// %k{re[\d*]}%v{re[\s+]} +func NewRecordReader(reader io.Reader, layout string) (*RecordReader, error) { + r := &RecordReader{r: bufio.NewReader(reader)} + if err := r.parseReadLayout(layout); err != nil { + return nil, err + } + return r, nil +} + +// ReadRecord reads the next record in the reader and returns it, or returns a +// parsing error. +// +// This will return io.EOF only if the underlying reader returns io.EOF at the +// start of a new record. If an io.EOF is returned mid record, this returns +// io.ErrUnexpectedEOF. It is expected for this function to be called until it +// returns io.EOF. +func (r *RecordReader) ReadRecord() (*Record, error) { + rec := new(Record) + return rec, r.ReadRecordInto(rec) +} + +// ReadRecordInto reads the next record into the given record and returns any +// parsing error +// +// This will return io.EOF only if the underlying reader returns io.EOF at the +// start of a new record. If an io.EOF is returned mid record, this returns +// io.ErrUnexpectedEOF. It is expected for this function to be called until it +// returns io.EOF. +func (r *RecordReader) ReadRecordInto(rec *Record) error { + if r.done { + return io.EOF + } + return r.next(rec) +} + +// SetReader replaces the underlying reader with the given reader. +func (r *RecordReader) SetReader(reader io.Reader) { + r.r = bufio.NewReader(reader) + r.done = false +} + +const ( + parsesTopic parseRecordBits = 1 << iota + parsesTopicSize + parsesKey + parsesKeySize + parsesValue + parsesValueSize + parsesHeaders + parsesHeadersNum +) + +// The record reading format must be either entirely sized or entirely unsized. +// This type helps us track what's what. +type parseRecordBits uint8 + +func (p *parseRecordBits) set(r parseRecordBits) { *p |= r } +func (p parseRecordBits) has(r parseRecordBits) bool { return p&r != 0 } + +func (r *RecordReader) parseReadLayout(layout string) error { + if len(layout) == 0 { + return errors.New("RecordReader: invalid empty format") + } + + var ( + // If we are reading by size, we parse the layout size into one + // of these variables. When reading, we use the captured + // variable's value. + topicSize = new(uint64) + keySize = new(uint64) + valueSize = new(uint64) + headersNum = new(uint64) + + bits parseRecordBits + + literal []byte // raw literal we are currently working on + addLiteral = func() { + if len(r.fns) > 0 && r.fns[len(r.fns)-1].read.empty() { + r.fns[len(r.fns)-1].read.delim = literal + } else if len(literal) > 0 { + r.fns = append(r.fns, readParse{ + read: readKind{exact: literal}, + }) + } + literal = nil + } + ) + + for len(layout) > 0 { + c, size := utf8.DecodeRuneInString(layout) + rawc := layout[:size] + layout = layout[size:] + switch c { + default: + literal = append(literal, rawc...) + continue + + case '\\': + c, n, err := parseLayoutSlash(layout) + if err != nil { + return err + } + layout = layout[n:] + literal = append(literal, c) + continue + + case '%': + } + + if len(layout) == 0 { + literal = append(literal, rawc...) + continue + } + + cNext, size := utf8.DecodeRuneInString(layout) + if cNext == '%' || cNext == '{' || cNext == '}' { + literal = append(literal, byte(cNext)) + layout = layout[size:] + continue + } + + var ( + isOpenBrace = len(layout) > 2 && layout[1] == '{' + handledBrace bool + escaped = layout[0] + ) + layout = layout[1:] + addLiteral() + + if isOpenBrace { // opening a brace: layout continues after + layout = layout[1:] + } + + switch escaped { + default: + return fmt.Errorf("unknown percent escape sequence %q", layout[:1]) + + case 'T', 'K', 'V', 'H': + var dst *uint64 + var bit parseRecordBits + switch escaped { + case 'T': + dst, bit = topicSize, parsesTopicSize + case 'K': + dst, bit = keySize, parsesKeySize + case 'V': + dst, bit = valueSize, parsesValueSize + case 'H': + dst, bit = headersNum, parsesHeadersNum + } + if bits.has(bit) { + return fmt.Errorf("%%%s is doubly specified", string(escaped)) + } + if bits.has(bit >> 1) { + return fmt.Errorf("size specification %%%s cannot come after value specification %%%s", string(escaped), strings.ToLower(string(escaped))) + } + bits.set(bit) + fn, n, err := r.parseReadSize("ascii", dst, false) + if handledBrace = isOpenBrace; handledBrace { + fn, n, err = r.parseReadSize(layout, dst, true) + } + if err != nil { + return fmt.Errorf("unable to parse %%%s: %s", string(escaped), err) + } + layout = layout[n:] + r.fns = append(r.fns, fn) + + case 'p', 'o', 'e', 'd', 'x', 'y': + dst := new(uint64) + fn, n, err := r.parseReadSize("ascii", dst, false) + if handledBrace = isOpenBrace; handledBrace { + fn, n, err = r.parseReadSize(layout, dst, true) + } + if err != nil { + return fmt.Errorf("unable to parse %%%s: %s", string(escaped), err) + } + layout = layout[n:] + numParse := fn.parse + switch escaped { + case 'p': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Partition = int32(*dst) + return nil + } + case 'o': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Offset = int64(*dst) + return nil + } + case 'e': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.LeaderEpoch = int32(*dst) + return nil + } + case 'd': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Timestamp = time.Unix(0, int64(*dst)*1e6) + return nil + } + case 'x': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.ProducerID = int64(*dst) + return nil + } + case 'y': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.ProducerEpoch = int16(*dst) + return nil + } + } + r.fns = append(r.fns, fn) + + case 't', 'k', 'v': + var decodeFn func([]byte) ([]byte, error) + var re *regexp.Regexp + var isJson bool + if handledBrace = isOpenBrace; handledBrace { + switch { + case strings.HasPrefix(layout, "}"): + layout = layout[len("}"):] + case strings.HasPrefix(layout, "base64}"): + decodeFn = decodeBase64 + layout = layout[len("base64}"):] + case strings.HasPrefix(layout, "hex}"): + decodeFn = decodeHex + layout = layout[len("hex}"):] + case strings.HasPrefix(layout, "json}"): + isJson = true + decodeFn = func(b []byte) ([]byte, error) { + var buf bytes.Buffer + err := json.Compact(&buf, b) + return buf.Bytes(), err + } + layout = layout[len("json}"):] + case strings.HasPrefix(layout, "re"): + restr, rem, err := nomOpenClose(layout[len("re"):]) + if err != nil { + return fmt.Errorf("re parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return fmt.Errorf("re missing closing } in %q", layout) + } + layout = rem[1:] + if !strings.HasPrefix(restr, "^") { + restr = "^" + restr + } + re, err = regexp.Compile(restr) + if err != nil { + return fmt.Errorf("re parse err: %v", err) + } + + default: + return fmt.Errorf("unknown %%%s{ escape", string(escaped)) + } + } + + var bit, bitSize parseRecordBits + var inner func([]byte, *Record) + var size *uint64 + switch escaped { + case 't': + bit, bitSize, size = parsesTopic, parsesTopicSize, topicSize + inner = func(b []byte, r *Record) { r.Topic = string(b) } + case 'k': + bit, bitSize, size = parsesKey, parsesKeySize, keySize + inner = func(b []byte, r *Record) { r.Key = dupslice(b) } + case 'v': + bit, bitSize, size = parsesValue, parsesValueSize, valueSize + inner = func(b []byte, r *Record) { r.Value = dupslice(b) } + } + + fn := readParse{parse: func(b []byte, r *Record) error { + if decodeFn != nil { + dec, err := decodeFn(b) + if err != nil { + return err + } + b = dec + } + inner(b, r) + return nil + }} + bit.set(bit) + if bits.has(bitSize) { + if re != nil { + return errors.New("cannot specify exact size and regular expression") + } + if isJson { + return errors.New("cannot specify exact size and json") + } + fn.read = readKind{sizefn: func() int { return int(*size) }} + } else if re != nil { + fn.read = readKind{re: re} + } else if isJson { + fn.read = readKind{condition: new(jsonReader).read} + } + r.fns = append(r.fns, fn) + + case 'h': + bits.set(parsesHeaders) + if !bits.has(parsesHeadersNum) { + return errors.New("missing header count specification %H before header specification %h") + } + if !isOpenBrace { + return errors.New("missing open brace sequence on %h signifying how headers are encoded") + } + handledBrace = true + // Similar to above, headers can have their own + // internal braces, so we look for a matching end. + braces := 1 + at := 0 + for braces != 0 && len(layout[at:]) > 0 { + switch layout[at] { + case '{': + if at > 0 && layout[at-1] != '%' { + braces++ + } + case '}': + if at > 0 && layout[at-1] != '%' { + braces-- + } + } + at++ + } + if braces > 0 { + return fmt.Errorf("invalid header specification: missing closing brace in %q", layout) + } + + // We parse the header specification recursively, but + // we require that it is sized and contains only keys + // and values. Checking the delimiter checks sizing. + var inr RecordReader + if err := inr.parseReadLayout(layout[:at-1]); err != nil { + return fmt.Errorf("invalid header specification: %v", err) + } + layout = layout[at:] + + // To parse headers, we save the inner reader's parsing + // function stash the current record's key/value before + // parsing, and then capture the key/value as a header. + r.fns = append(r.fns, readParse{read: readKind{handoff: func(r *RecordReader, rec *Record) error { + k, v := rec.Key, rec.Value + defer func() { rec.Key, rec.Value = k, v }() + inr.r = r.r + for i := uint64(0); i < *headersNum; i++ { + rec.Key, rec.Value = nil, nil + if err := inr.next(rec); err != nil { + return err + } + rec.Headers = append(rec.Headers, RecordHeader{Key: string(rec.Key), Value: rec.Value}) + } + return nil + }}}) + } + + if isOpenBrace && !handledBrace { + return fmt.Errorf("unhandled open brace %q", layout) + } + } + + addLiteral() + + // We must sort noreads to the front, we use this guarantee when + // reading to handle EOF properly. + var noreads, reads []readParse + for _, fn := range r.fns { + if fn.read.noread { + noreads = append(noreads, fn) + } else { + reads = append(reads, fn) + } + } + r.fns = make([]readParse, 0, len(noreads)+len(reads)) + r.fns = append(r.fns, noreads...) + r.fns = append(r.fns, reads...) + + return nil +} + +// Returns a function that parses a number from the internal reader into dst. +// +// If needBrace is true, the user is specifying how to read the number, +// otherwise we default to ascii. Reading ascii requires us to peek at bytes +// until we get to a non-number byte. +func (*RecordReader) parseReadSize(layout string, dst *uint64, needBrace bool) (readParse, int, error) { + var end int + if needBrace { + braceEnd := strings.IndexByte(layout, '}') + if braceEnd == -1 { + return readParse{}, 0, errors.New("missing brace end } to close number size specification") + } + layout = layout[:braceEnd] + end = braceEnd + 1 + } + + switch layout { + default: + num, err := strconv.Atoi(layout) + if err != nil { + return readParse{}, 0, fmt.Errorf("unrecognized number reading layout %q: %v", layout, err) + } + if num <= 0 { + return readParse{}, 0, fmt.Errorf("invalid zero or negative number %q when parsing read size", layout) + } + return readParse{ + readKind{noread: true}, + func([]byte, *Record) error { *dst = uint64(num); return nil }, + }, end, nil + + case "ascii", "number": + return readParse{ + readKind{condition: func(b byte) int8 { + if b < '0' || b > '9' { + return -1 + } + return 2 // ignore EOF if we hit it after this + }}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 10, 64) + return err + }, + }, end, nil + + case "big64": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) error { *dst = binary.BigEndian.Uint64(b); return nil }, + }, end, nil + case "big32": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) error { *dst = uint64(binary.BigEndian.Uint32(b)); return nil }, + }, end, nil + case "big16": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) error { *dst = uint64(binary.BigEndian.Uint16(b)); return nil }, + }, end, nil + + case "little64": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) error { *dst = binary.LittleEndian.Uint64(b); return nil }, + }, end, nil + case "little32": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) error { *dst = uint64(binary.LittleEndian.Uint32(b)); return nil }, + }, end, nil + case "little16": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) error { *dst = uint64(binary.LittleEndian.Uint16(b)); return nil }, + }, end, nil + + case "byte", "big8", "little8": + return readParse{ + readKind{size: 1}, + func(b []byte, _ *Record) error { *dst = uint64(b[0]); return nil }, + }, end, nil + + case "hex64": + return readParse{ + readKind{size: 16}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex32": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex16": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex8": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex4": + return readParse{ + readKind{size: 1}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + + case "bool": + const ( + stateUnknown uint8 = iota + stateTrue + stateFalse + ) + var state uint8 + var last byte + return readParse{ + readKind{condition: func(b byte) (done int8) { + defer func() { + if done <= 0 { + state = stateUnknown + last = 0 + } + }() + + switch state { + default: // stateUnknown + if b == 't' { + state = stateTrue + last = b + return 1 + } else if b == 'f' { + state = stateFalse + last = b + return 1 + } + return -1 + + case stateTrue: + if last == 't' && b == 'r' || last == 'r' && b == 'u' { + last = b + return 1 + } else if last == 'u' && b == 'e' { + return 0 + } + return -1 + + case stateFalse: + if last == 'f' && b == 'a' || last == 'a' && b == 'l' || last == 'l' && b == 's' { + last = b + return 1 + } else if last == 's' && b == 'e' { + return 0 + } + return -1 + } + }}, + func(b []byte, _ *Record) error { + switch string(b) { + case "true": + *dst = 1 + case "false": + *dst = 0 + default: + return fmt.Errorf("invalid bool %s", b) + } + return nil + }, + }, end, nil + } +} + +func decodeBase64(b []byte) ([]byte, error) { + n, err := base64.StdEncoding.Decode(b[:base64.StdEncoding.DecodedLen(len(b))], b) + return b[:n], err +} + +func decodeHex(b []byte) ([]byte, error) { + n, err := hex.Decode(b[:hex.DecodedLen(len(b))], b) + return b[:n], err +} + +type readKind struct { + noread bool + exact []byte + condition func(byte) int8 // -2: error, -1: stop, do not consume input; 0: stop, consume input; 1: keep going, consume input, 2: keep going, consume input, can EOF + size int + sizefn func() int + handoff func(*RecordReader, *Record) error + delim []byte + re *regexp.Regexp +} + +func (r *readKind) empty() bool { + return !r.noread && + r.exact == nil && + r.condition == nil && + r.size == 0 && + r.sizefn == nil && + r.handoff == nil && + r.delim == nil && + r.re == nil +} + +type readParse struct { + read readKind + parse func([]byte, *Record) error +} + +func dupslice(b []byte) []byte { + if len(b) == 0 { + return nil + } + dup := make([]byte, len(b)) + copy(dup, b) + return dup +} + +func (r *RecordReader) next(rec *Record) error { + for i, fn := range r.fns { + r.buf = r.buf[:0] + + var err error + switch { + case fn.read.noread: + // do nothing + case fn.read.exact != nil: + err = r.readExact(fn.read.exact) + case fn.read.condition != nil: + err = r.readCondition(fn.read.condition) + case fn.read.size > 0: + err = r.readSize(fn.read.size) + case fn.read.sizefn != nil: + err = r.readSize(fn.read.sizefn()) + case fn.read.handoff != nil: + err = fn.read.handoff(r, rec) + case fn.read.re != nil: + err = r.readRe(fn.read.re) + default: + err = r.readDelim(fn.read.delim) // we *always* fall back to delim parsing + } + + switch err { + default: + return err + case nil: + case io.EOF, io.ErrUnexpectedEOF: + r.done = true + // We guarantee that all noread parses are at + // the front, so if we io.EOF on the first + // non-noread, then we bubble it up. + if len(r.buf) == 0 && (i == 0 || r.fns[i-1].read.noread) { + return io.EOF + } + if i != len(r.fns)-1 || err == io.ErrUnexpectedEOF { + return io.ErrUnexpectedEOF + } + } + + if fn.parse == nil { + continue + } + + if err := fn.parse(r.buf, rec); err != nil { + return err + } + } + return nil +} + +func (r *RecordReader) readCondition(fn func(byte) int8) error { + var ignoreEOF bool + for { + peek, err := r.r.Peek(1) + if err != nil { + if err == io.EOF && ignoreEOF { + err = nil + } + return err + } + ignoreEOF = false + c := peek[0] + switch fn(c) { + case -2: + return fmt.Errorf("invalid input %q", c) + case -1: + return nil + case 0: + r.r.Discard(1) + r.buf = append(r.buf, c) + return nil + case 1: + case 2: + ignoreEOF = true + } + r.r.Discard(1) + r.buf = append(r.buf, c) + } +} + +type reReader struct { + r *RecordReader + peek []byte + err error +} + +func (re *reReader) ReadRune() (r rune, size int, err error) { + re.peek, re.err = re.r.r.Peek(len(re.peek) + 1) + if re.err != nil { + return 0, 0, re.err + } + return rune(re.peek[len(re.peek)-1]), 1, nil +} + +func (r *RecordReader) readRe(re *regexp.Regexp) error { + reader := reReader{r: r} + loc := re.FindReaderIndex(&reader) + if loc == nil { + if reader.err == io.EOF && len(reader.peek) > 0 { + return fmt.Errorf("regexp text mismatch, saw %q", reader.peek) + } + return reader.err + } + n := loc[1] // we ensure the regexp begins with ^, so we only need the end + r.buf = append(r.buf, reader.peek[:n]...) + r.r.Discard(n) + if n == len(reader.peek) { + return reader.err + } + return nil +} + +func (r *RecordReader) readSize(n int) error { + r.buf = append(r.buf, make([]byte, n)...) + n, err := io.ReadFull(r.r, r.buf) + r.buf = r.buf[:n] + return err +} + +func (r *RecordReader) readExact(d []byte) error { + if err := r.readSize(len(d)); err != nil { + return err + } + if !bytes.Equal(d, r.buf) { + return fmt.Errorf("exact text mismatch, read %q when expecting %q", r.buf, d) + } + return nil +} + +func (r *RecordReader) readDelim(d []byte) error { + // Empty delimiters opt in to reading the rest of the text. + if len(d) == 0 { + b, err := io.ReadAll(r.r) + r.buf = b + // ReadAll stops at io.EOF, but we need to bubble that up. + if err == nil { + return io.EOF + } + return err + } + + // We use the simple inefficient search algorithm, which can be O(nm), + // but we aren't expecting huge search spaces. Long term we could + // convert to a two-way search. + for { + peek, err := r.r.Peek(len(d)) + if err != nil { + // If we peek an io.EOF, we were looking for our delim + // and hit the end. This is unexpected. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if !bytes.Equal(peek, d) { + // We did not find our delim. Skip the first char + // then continue again. + r.buf = append(r.buf, peek[0]) + r.r.Discard(1) + continue + } + // We found our delim. We discard it and return. + r.r.Discard(len(d)) + return nil + } +} + +type jsonReader struct { + state int8 + n int8 // misc. + nexts []int8 +} + +func (*jsonReader) isHex(c byte) bool { + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', + 'A', 'B', 'C', 'D', 'E', 'F': + return true + default: + return false + } +} + +func (*jsonReader) isNum(c byte) bool { + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +func (*jsonReader) isNat(c byte) bool { + switch c { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +func (*jsonReader) isE(c byte) bool { + return c == 'e' || c == 'E' +} + +const ( + jrstAny int8 = iota + jrstObj + jrstObjSep + jrstObjFin + jrstArr + jrstArrFin + jrstStrBegin + jrstStr + jrstStrEsc + jrstStrEscU + jrstTrue + jrstFalse + jrstNull + jrstNeg + jrstOne + jrstDotOrE + jrstDot + jrstE +) + +func (r *jsonReader) read(c byte) (rr int8) { +start: + switch r.state { + case jrstAny: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace, need more + case '{': + r.state = jrstObj + return 1 // object open, need more + case '[': + r.state = jrstArr + return 1 // array open, need more + case '"': + r.state = jrstStr + return 1 // string open, need more + case 't': + r.state = jrstTrue + r.n = 0 + return 1 // beginning of true, need more + case 'f': + r.state = jrstFalse + r.n = 0 + return 1 // beginning of false, need more + case 'n': + r.state = jrstNull + r.n = 0 + return 1 // beginning of null, need more + case '-': + r.state = jrstNeg + return 1 // beginning of negative number, need more + case '0': + r.state = jrstDotOrE + return 1 // beginning of 0e or 0., need more + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + r.state = jrstOne + return 1 // beginning of number, need more + default: + return -2 // invalid json + } + + case jrstObj: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace in json object, need more + case '"': + r.pushState(jrstStr, jrstObjSep) + return 1 // beginning of object key, need to finish, transition to obj sep + case '}': + return r.popState() // end of object, this is valid json end, pop state + default: + return -2 // invalid json: expected object key + } + case jrstObjSep: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace in json object, need more + case ':': + r.pushState(jrstAny, jrstObjFin) + return 1 // beginning of object value, need to finish, transition to obj fin + default: + return -2 // invalid json: expected object separator + } + case jrstObjFin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json object, need more + case ',': + r.pushState(jrstStrBegin, jrstObjSep) + return 1 // beginning of new object key, need to finish, transition to obj sep + case '}': + return r.popState() // end of object, this is valid json end, pop state + default: + return -2 // invalid json + } + + case jrstArr: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json array, need more + case ']': + return r.popState() // end of array, this is valid json end, pop state + default: + r.pushState(jrstAny, jrstArrFin) + goto start // array value began: immediately transition to it + } + case jrstArrFin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json array, need more + case ',': + r.state = jrstArr + return 1 // beginning of new array value, need more + case ']': + return r.popState() // end of array, this is valid json end, pop state + default: + return -2 // invalid json + } + + case jrstStrBegin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json object (before beginning of key), need more + case '"': + r.state = jrstStr + return 1 // beginning of object key, need more + default: + return -2 // invalid json + } + + case jrstStr: + switch c { + case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: + return -2 // invalid json: control characters not allowed in string + case '"': + return r.popState() // end of string, this is valid json end, pop state + case '\\': + r.state = jrstStrEsc + return 1 // beginning of escape sequence, need more + default: + return 1 // continue string, need more + } + case jrstStrEsc: + switch c { + case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': + r.state = jrstStr + return 1 // end of escape sequence, still need to finish string + case 'u': + r.state = jrstStrEscU + r.n = 0 + return 1 // beginning of unicode escape sequence, need more + default: + return -2 // invalid json: invalid escape sequence + } + case jrstStrEscU: + if !r.isHex(c) { + return -2 // invalid json: invalid unicode escape sequence + } + r.n++ + if r.n == 4 { + r.state = jrstStr + } + return 1 // end of unicode escape sequence, still need to finish string + + case jrstTrue: + switch { + case r.n == 0 && c == 'r': + r.n++ + return 1 + case r.n == 1 && c == 'u': + r.n++ + return 1 + case r.n == 2 && c == 'e': + return r.popState() // end of true, this is valid json end, pop state + } + case jrstFalse: + switch { + case r.n == 0 && c == 'a': + r.n++ + return 1 + case r.n == 1 && c == 'l': + r.n++ + return 1 + case r.n == 2 && c == 's': + r.n++ + return 1 + case r.n == 3 && c == 'e': + return r.popState() // end of false, this is valid json end, pop state + } + case jrstNull: + switch { + case r.n == 0 && c == 'u': + r.n++ + return 1 + case r.n == 1 && c == 'l': + r.n++ + return 1 + case r.n == 2 && c == 'l': + return r.popState() // end of null, this is valid json end, pop state + } + + case jrstNeg: + if c == '0' { + r.state = jrstDotOrE + return r.oneOrTwo() // beginning of -0, need to see if there is more (potentially end) + } else if r.isNat(c) { + r.state = jrstOne + return r.oneOrTwo() // beginning of -1 (or 2,3,..9), need to see if there is more (potentially end) + } + return -2 // invalid, -a or something + case jrstOne: + if r.isNum(c) { + return r.oneOrTwo() // continue the number (potentially end) + } + fallthrough // not a number, check if e or . + case jrstDotOrE: + if r.isE(c) { + r.state = jrstE + return 1 // beginning of exponent, need more + } + if c == '.' { + r.state = jrstDot + r.n = 0 + return 1 // beginning of dot, need more + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + + case jrstDot: + switch r.n { + case 0: + if !r.isNum(c) { + return -2 // first char after dot must be a number + } + r.n = 1 + return r.oneOrTwo() // saw number, keep and continue (potentially end) + case 1: + if r.isNum(c) { + return r.oneOrTwo() // more number, keep and continue (potentially end) + } + if r.isE(c) { + r.state = jrstE + r.n = 0 + return 1 // beginning of exponent (-0.1e), need more + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + } + case jrstE: + switch r.n { + case 0: + if c == '+' || c == '-' { + r.n = 1 + return 1 // beginning of exponent sign, need more + } + fallthrough + case 1: + if !r.isNum(c) { + return -2 // first char after exponent must be sign or number + } + r.n = 2 + return r.oneOrTwo() // saw number, keep and continue (potentially end) + case 2: + if r.isNum(c) { + return r.oneOrTwo() // more number, keep and continue (potentially end) + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + } + } + return -2 // unknown state +} + +func (r *jsonReader) pushState(next, next2 int8) { + r.nexts = append(r.nexts, next2) + r.state = next +} + +func (r *jsonReader) popState() int8 { + if len(r.nexts) == 0 { + r.state = jrstAny + return 0 + } + r.state = r.nexts[len(r.nexts)-1] + r.nexts = r.nexts[:len(r.nexts)-1] + return 1 +} + +func (r *jsonReader) popStateToStart() bool { + if len(r.nexts) == 0 { + r.state = jrstAny + return false + } + r.state = r.nexts[len(r.nexts)-1] + r.nexts = r.nexts[:len(r.nexts)-1] + return true +} + +func (r *jsonReader) oneOrTwo() int8 { + if len(r.nexts) > 0 { + return 1 + } + return 2 +} + +//////////// +// COMMON // +//////////// + +func parseLayoutSlash(layout string) (byte, int, error) { + if len(layout) == 0 { + return 0, 0, errors.New("invalid slash escape at end of delim string") + } + switch layout[0] { + case 't': + return '\t', 1, nil + case 'n': + return '\n', 1, nil + case 'r': + return '\r', 1, nil + case '\\': + return '\\', 1, nil + case 'x': + if len(layout) < 3 { // on x, need two more + return 0, 0, errors.New("invalid non-terminated hex escape sequence at end of delim string") + } + hex := layout[1:3] + n, err := strconv.ParseInt(hex, 16, 8) + if err != nil { + return 0, 0, fmt.Errorf("unable to parse hex escape sequence %q: %v", hex, err) + } + return byte(n), 3, nil + default: + return 0, 0, fmt.Errorf("unknown slash escape sequence %q", layout[:1]) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go b/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go new file mode 100644 index 000000000000..3ef989f49bf4 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go @@ -0,0 +1,269 @@ +package kgo + +import "sync" + +// The ring types below are fixed sized blocking MPSC ringbuffers. These +// replace channels in a few places in this client. The *main* advantage they +// provide is to allow loops that terminate. +// +// With channels, we always have to have a goroutine draining the channel. We +// cannot start the goroutine when we add the first element, because the +// goroutine will immediately drain the first and if something produces right +// away, it will start a second concurrent draining goroutine. +// +// We cannot fix that by adding a "working" field, because we would need a lock +// around checking if the goroutine still has elements *and* around setting the +// working field to false. If a push was blocked, it would be holding the lock, +// which would block the worker from grabbing the lock. Any other lock ordering +// has TOCTOU problems as well. +// +// We could use a slice that we always push to and pop the front of. This is a +// bit easier to reason about, but constantly reallocates and has no bounded +// capacity. The second we think about adding bounded capacity, we get this +// ringbuffer below. +// +// The key insight is that we only pop the front *after* we are done with it. +// If there are still more elements, the worker goroutine can continue working. +// If there are no more elements, it can quit. When pushing, if the pusher +// pushed the first element, it starts the worker. +// +// Pushes fail if the ring is dead, allowing the pusher to fail any promise. +// If a die happens while a worker is running, all future pops will see the +// ring is dead and can fail promises immediately. If a worker is not running, +// then there are no promises that need to be called. +// +// We use size 8 buffers because eh why not. This gives us a small optimization +// of masking to increment and decrement, rather than modulo arithmetic. + +const ( + mask7 = 0b0000_0111 + eight = mask7 + 1 +) + +type ringReq struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]promisedReq + + head uint8 + tail uint8 + l uint8 + dead bool +} + +func (r *ringReq) die() { + r.mu.Lock() + defer r.mu.Unlock() + + r.dead = true + if r.c != nil { + r.c.Broadcast() + } +} + +func (r *ringReq) push(pr promisedReq) (first, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight && !r.dead { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + if r.dead { + return false, true + } + + r.elems[r.tail] = pr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1, false +} + +func (r *ringReq) dropPeek() (next promisedReq, more, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = promisedReq{} + r.head = (r.head + 1) & mask7 + r.l-- + + // If the cond has been initialized, there could potentially be waiters + // and we must always signal. + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0, r.dead +} + +// ringResp duplicates the code above, but for promisedResp +type ringResp struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]promisedResp + + head uint8 + tail uint8 + l uint8 + dead bool +} + +func (r *ringResp) die() { + r.mu.Lock() + defer r.mu.Unlock() + + r.dead = true + if r.c != nil { + r.c.Broadcast() + } +} + +func (r *ringResp) push(pr promisedResp) (first, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight && !r.dead { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + if r.dead { + return false, true + } + + r.elems[r.tail] = pr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1, false +} + +func (r *ringResp) dropPeek() (next promisedResp, more, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = promisedResp{} + r.head = (r.head + 1) & mask7 + r.l-- + + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0, r.dead +} + +// ringSeqResp duplicates the code above, but for *seqResp. We leave off die +// because we do not use it, but we keep `c` for testing lowering eight/mask7. +type ringSeqResp struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]*seqResp + + head uint8 + tail uint8 + l uint8 +} + +func (r *ringSeqResp) push(sr *seqResp) (first bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + r.elems[r.tail] = sr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1 +} + +func (r *ringSeqResp) dropPeek() (next *seqResp, more bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = nil + r.head = (r.head + 1) & mask7 + r.l-- + + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0 +} + +// Also no die; this type is slightly different because we can have overflow. +// If we have overflow, we add to overflow until overflow is drained -- we +// always want strict odering. +type ringBatchPromise struct { + mu sync.Mutex + + elems [eight]batchPromise + + head uint8 + tail uint8 + l uint8 + + overflow []batchPromise +} + +func (r *ringBatchPromise) push(b batchPromise) (first bool) { + r.mu.Lock() + defer r.mu.Unlock() + + // If the ring is full, we go into overflow; if overflow is non-empty, + // for ordering purposes, we add to the end of overflow. We only go + // back to using the ring once overflow is finally empty. + if r.l == eight || len(r.overflow) > 0 { + r.overflow = append(r.overflow, b) + return false + } + + r.elems[r.tail] = b + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1 +} + +func (r *ringBatchPromise) dropPeek() (next batchPromise, more bool) { + r.mu.Lock() + defer r.mu.Unlock() + + // We always drain the ring first. If the ring is ever empty, there + // must be overflow: we would not be here if the ring is not-empty. + if r.l > 1 { + r.elems[r.head] = batchPromise{} + r.head = (r.head + 1) & mask7 + r.l-- + return r.elems[r.head], true + } else if r.l == 1 { + r.elems[r.head] = batchPromise{} + r.head = (r.head + 1) & mask7 + r.l-- + if len(r.overflow) == 0 { + return next, false + } + return r.overflow[0], true + } + r.overflow = r.overflow[1:] + if len(r.overflow) > 0 { + return r.overflow[0], true + } + return next, false +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go b/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go new file mode 100644 index 000000000000..6d0f3dfe008d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go @@ -0,0 +1,2380 @@ +package kgo + +import ( + "bytes" + "context" + "errors" + "fmt" + "hash/crc32" + "math" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type sink struct { + cl *Client // our owning client, for cfg, metadata triggering, context, etc. + nodeID int32 // the node ID of the broker this sink belongs to + + // inflightSem controls the number of concurrent produce requests. We + // start with a limit of 1, which covers Kafka v0.11.0. On the first + // response, we check what version was set in the request. If it is at + // least 4, which 1.0 introduced, we upgrade the sem size. + inflightSem atomic.Value + produceVersion atomicI32 // negative is unset, positive is version + + drainState workLoop + + // seqRespsMu, guarded by seqRespsMu, contains responses that must + // be handled sequentially. These responses are handled asynchronously, + // but sequentially. + seqResps ringSeqResp + + backoffMu sync.Mutex // guards the following + needBackoff bool + backoffSeq uint32 // prevents pile on failures + + // consecutiveFailures is incremented every backoff and cleared every + // successful response. For simplicity, if we have a good response + // following an error response before the error response's backoff + // occurs, the backoff is not cleared. + consecutiveFailures atomicU32 + + recBufsMu sync.Mutex // guards the following + recBufs []*recBuf // contains all partition records for batch building + recBufsStart int // incremented every req to avoid large batch starvation +} + +type seqResp struct { + resp kmsg.Response + err error + done chan struct{} + br *broker + promise func(*broker, kmsg.Response, error) +} + +func (cl *Client) newSink(nodeID int32) *sink { + s := &sink{ + cl: cl, + nodeID: nodeID, + } + s.produceVersion.Store(-1) + maxInflight := 1 + if cl.cfg.disableIdempotency { + maxInflight = cl.cfg.maxProduceInflight + } + s.inflightSem.Store(make(chan struct{}, maxInflight)) + return s +} + +// createReq returns a produceRequest from currently buffered records +// and whether there are more records to create more requests immediately. +func (s *sink) createReq(id int64, epoch int16) (*produceRequest, *kmsg.AddPartitionsToTxnRequest, bool) { + req := &produceRequest{ + txnID: s.cl.cfg.txnID, + acks: s.cl.cfg.acks.val, + timeout: int32(s.cl.cfg.produceTimeout.Milliseconds()), + batches: make(seqRecBatches, 5), + + producerID: id, + producerEpoch: epoch, + + hasHook: s.cl.producer.hasHookBatchWritten, + compressor: s.cl.compressor, + + wireLength: s.cl.baseProduceRequestLength(), // start length with no topics + wireLengthLimit: s.cl.cfg.maxBrokerWriteBytes, + } + txnBuilder := txnReqBuilder{ + txnID: req.txnID, + id: id, + epoch: epoch, + } + + var moreToDrain bool + + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + + recBufsIdx := s.recBufsStart + for i := 0; i < len(s.recBufs); i++ { + recBuf := s.recBufs[recBufsIdx] + recBufsIdx = (recBufsIdx + 1) % len(s.recBufs) + + recBuf.mu.Lock() + if recBuf.failing || len(recBuf.batches) == recBuf.batchDrainIdx || recBuf.inflightOnSink != nil && recBuf.inflightOnSink != s || recBuf.inflight != 0 && !recBuf.okOnSink { + recBuf.mu.Unlock() + continue + } + + batch := recBuf.batches[recBuf.batchDrainIdx] + if added := req.tryAddBatch(s.produceVersion.Load(), recBuf, batch); !added { + recBuf.mu.Unlock() + moreToDrain = true + continue + } + + recBuf.inflightOnSink = s + recBuf.inflight++ + + recBuf.batchDrainIdx++ + recBuf.seq = incrementSequence(recBuf.seq, int32(len(batch.records))) + moreToDrain = moreToDrain || recBuf.tryStopLingerForDraining() + recBuf.mu.Unlock() + + txnBuilder.add(recBuf) + } + + // We could have lost our only record buffer just before we grabbed the + // lock above, so we have to check there are recBufs. + if len(s.recBufs) > 0 { + s.recBufsStart = (s.recBufsStart + 1) % len(s.recBufs) + } + return req, txnBuilder.req, moreToDrain +} + +func incrementSequence(sequence, increment int32) int32 { + if sequence > math.MaxInt32-increment { + return increment - (math.MaxInt32 - sequence) - 1 + } + + return sequence + increment +} + +type txnReqBuilder struct { + txnID *string + req *kmsg.AddPartitionsToTxnRequest + id int64 + epoch int16 + addedTopics map[string]int // topic => index into req +} + +func (t *txnReqBuilder) add(rb *recBuf) { + if t.txnID == nil { + return + } + if rb.addedToTxn.Swap(true) { + return + } + if t.req == nil { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.TransactionalID = *t.txnID + req.ProducerID = t.id + req.ProducerEpoch = t.epoch + t.req = req + t.addedTopics = make(map[string]int, 10) + } + idx, exists := t.addedTopics[rb.topic] + if !exists { + idx = len(t.req.Topics) + t.addedTopics[rb.topic] = idx + reqTopic := kmsg.NewAddPartitionsToTxnRequestTopic() + reqTopic.Topic = rb.topic + t.req.Topics = append(t.req.Topics, reqTopic) + } + t.req.Topics[idx].Partitions = append(t.req.Topics[idx].Partitions, rb.partition) +} + +func (s *sink) maybeDrain() { + if s.cl.cfg.manualFlushing && s.cl.producer.flushing.Load() == 0 { + return + } + if s.drainState.maybeBegin() { + go s.drain() + } +} + +func (s *sink) maybeBackoff() { + s.backoffMu.Lock() + backoff := s.needBackoff + s.backoffMu.Unlock() + + if !backoff { + return + } + defer s.clearBackoff() + + s.cl.triggerUpdateMetadata(false, "opportunistic load during sink backoff") // as good a time as any + + tries := int(s.consecutiveFailures.Add(1)) + after := time.NewTimer(s.cl.cfg.retryBackoff(tries)) + defer after.Stop() + + select { + case <-after.C: + case <-s.cl.ctx.Done(): + case <-s.anyCtx().Done(): + } +} + +func (s *sink) maybeTriggerBackoff(seq uint32) { + s.backoffMu.Lock() + defer s.backoffMu.Unlock() + if seq == s.backoffSeq { + s.needBackoff = true + } +} + +func (s *sink) clearBackoff() { + s.backoffMu.Lock() + defer s.backoffMu.Unlock() + s.backoffSeq++ + s.needBackoff = false +} + +// drain drains buffered records and issues produce requests. +// +// This function is harmless if there are no records that need draining. +// We rely on that to not worry about accidental triggers of this function. +func (s *sink) drain() { + again := true + for again { + s.maybeBackoff() + + sem := s.inflightSem.Load().(chan struct{}) + select { + case sem <- struct{}{}: + case <-s.cl.ctx.Done(): + s.drainState.hardFinish() + return + } + + again = s.drainState.maybeFinish(s.produce(sem)) + } +} + +// Returns the first context encountered ranging across all records. +// This does not use defers to make it clear at the return that all +// unlocks are called in proper order. Ideally, do not call this func +// due to lock intensity. +func (s *sink) anyCtx() context.Context { + s.recBufsMu.Lock() + for _, recBuf := range s.recBufs { + recBuf.mu.Lock() + if len(recBuf.batches) > 0 { + batch0 := recBuf.batches[0] + batch0.mu.Lock() + if batch0.canFailFromLoadErrs && len(batch0.records) > 0 { + r0 := batch0.records[0] + if rctx := r0.cancelingCtx(); rctx != nil { + batch0.mu.Unlock() + recBuf.mu.Unlock() + s.recBufsMu.Unlock() + return rctx + } + } + batch0.mu.Unlock() + } + recBuf.mu.Unlock() + } + s.recBufsMu.Unlock() + return context.Background() +} + +func (s *sink) produce(sem <-chan struct{}) bool { + var produced bool + defer func() { + if !produced { + <-sem + } + }() + + // We could have been triggered from a metadata update even though the + // user is not producing at all. If we have no buffered records, let's + // avoid potentially creating a producer ID. + if s.cl.BufferedProduceRecords() == 0 { + return false + } + + // producerID can fail from: + // - retry failure + // - auth failure + // - transactional: a produce failure that failed the producer ID + // - AddPartitionsToTxn failure (see just below) + // - some head-of-line context failure + // + // All but the first error is fatal. Recovery may be possible with + // EndTransaction in specific cases, but regardless, all buffered + // records must fail. + // + // NOTE: we init the producer ID before creating a request to ensure we + // are always using the latest id/epoch with the proper sequence + // numbers. (i.e., resetAllSequenceNumbers && producerID logic combo). + // + // For the first-discovered-record-head-of-line context, we want to + // avoid looking it up if possible (which is why producerID takes a + // ctxFn). If we do use one, we want to be sure that the + // context.Canceled error is from *that* context rather than the client + // context or something else. So, we go through some special care to + // track setting the ctx / looking up if it is canceled. + var holCtxMu sync.Mutex + var holCtx context.Context + ctxFn := func() context.Context { + holCtxMu.Lock() + defer holCtxMu.Unlock() + holCtx = s.anyCtx() + return holCtx + } + isHolCtxDone := func() bool { + holCtxMu.Lock() + defer holCtxMu.Unlock() + if holCtx == nil { + return false + } + select { + case <-holCtx.Done(): + return true + default: + } + return false + } + + id, epoch, err := s.cl.producerID(ctxFn) + if err != nil { + var pe *errProducerIDLoadFail + switch { + case errors.As(err, &pe): + if errors.Is(pe.err, context.Canceled) && isHolCtxDone() { + // Some head-of-line record in a partition had a context cancelation. + // We look for any partition with HOL cancelations and fail them all. + s.cl.cfg.logger.Log(LogLevelInfo, "the first record in some partition(s) had a context cancelation; failing all relevant partitions", "broker", logID(s.nodeID)) + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + for _, recBuf := range s.recBufs { + recBuf.mu.Lock() + var failAll bool + if len(recBuf.batches) > 0 { + batch0 := recBuf.batches[0] + batch0.mu.Lock() + if batch0.canFailFromLoadErrs && len(batch0.records) > 0 { + r0 := batch0.records[0] + if rctx := r0.cancelingCtx(); rctx != nil { + select { + case <-rctx.Done(): + failAll = true // we must not call failAllRecords here, because failAllRecords locks batches! + default: + } + } + } + batch0.mu.Unlock() + } + if failAll { + recBuf.failAllRecords(err) + } + recBuf.mu.Unlock() + } + return true + } + s.cl.bumpRepeatedLoadErr(err) + s.cl.cfg.logger.Log(LogLevelWarn, "unable to load producer ID, bumping client's buffered record load errors by 1 and retrying") + return true // whatever caused our produce, we did nothing, so keep going + case errors.Is(err, ErrClientClosed): + s.cl.failBufferedRecords(err) + default: + s.cl.cfg.logger.Log(LogLevelError, "fatal InitProducerID error, failing all buffered records", "broker", logID(s.nodeID), "err", err) + s.cl.failBufferedRecords(err) + } + return false + } + + if !s.cl.producer.maybeAddInflight() { // must do before marking recBufs on a txn + return false + } + defer func() { + if !produced { + s.cl.producer.decInflight() + } + }() + + // NOTE: we create the req AFTER getting our producer ID! + // + // If a prior response caused errReloadProducerID, then calling + // producerID() sets needSeqReset, and creating the request resets + // sequence numbers. We need to have that logic occur before we create + // the request, otherwise we will create a request with the old + // sequence numbers using our new producer ID, which will then again + // fail with OOOSN. + req, txnReq, moreToDrain := s.createReq(id, epoch) + if len(req.batches) == 0 { // everything was failing or lingering + return moreToDrain + } + + if txnReq != nil { + // txnReq can fail from: + // - retry failure + // - auth failure + // - producer id mapping / epoch errors + // The latter case can potentially recover with the kip logic + // we have defined in EndTransaction. Regardless, on failure + // here, all buffered records must fail. + // We do not need to clear the addedToTxn flag for any recBuf + // it was set on, since producer id recovery resets the flag. + batchesStripped, err := s.doTxnReq(req, txnReq) + if err != nil { + switch { + case isRetryableBrokerErr(err) || isDialNonTimeoutErr(err): + s.cl.bumpRepeatedLoadErr(err) + s.cl.cfg.logger.Log(LogLevelWarn, "unable to AddPartitionsToTxn due to retryable broker err, bumping client's buffered record load errors by 1 and retrying", "err", err) + s.cl.triggerUpdateMetadata(false, "attempting to refresh broker list due to failed AddPartitionsToTxn requests") + return moreToDrain || len(req.batches) > 0 // nothing stripped if request-issuing error + default: + // Note that err can be InvalidProducerEpoch, which is + // potentially recoverable in EndTransaction. + // + // We do not fail all buffered records here, + // because that can lead to undesirable behavior + // with produce request vs. end txn (KAFKA-12671) + s.cl.failProducerID(id, epoch, err) + s.cl.cfg.logger.Log(LogLevelError, "fatal AddPartitionsToTxn error, failing all buffered records (it is possible the client can recover after EndTransaction)", "broker", logID(s.nodeID), "err", err) + } + return false + } + + // If we stripped everything, ensure we backoff to force a + // metadata load. If not everything was stripped, we issue our + // request and ensure we will retry a producing until + // everything is stripped (and we eventually back off). + if batchesStripped { + moreToDrain = true + if len(req.batches) == 0 { + s.maybeTriggerBackoff(s.backoffSeq) + } + } + } + + if len(req.batches) == 0 { // txn req could have removed some partitions to retry later (unknown topic, etc.) + return moreToDrain + } + + req.backoffSeq = s.backoffSeq // safe to read outside mu since we are in drain loop + + produced = true + + batches := req.batches.sliced() + s.doSequenced(req, func(br *broker, resp kmsg.Response, err error) { + s.handleReqResp(br, req, resp, err) + s.cl.producer.decInflight() + batches.eachOwnerLocked((*recBatch).decInflight) + <-sem + }) + return moreToDrain +} + +// With handleSeqResps below, this function ensures that all request responses +// are handled in order. We use this guarantee while in handleReqResp below. +func (s *sink) doSequenced( + req kmsg.Request, + promise func(*broker, kmsg.Response, error), +) { + wait := &seqResp{ + done: make(chan struct{}), + promise: promise, + } + + // We can NOT use any record context. If we do, we force the request to + // fail while also force the batch to be unfailable (due to no + // response), + br, err := s.cl.brokerOrErr(s.cl.ctx, s.nodeID, errUnknownBroker) + if err != nil { + wait.err = err + close(wait.done) + } else { + br.do(s.cl.ctx, req, func(resp kmsg.Response, err error) { + wait.resp = resp + wait.err = err + close(wait.done) + }) + wait.br = br + } + + if first := s.seqResps.push(wait); first { + go s.handleSeqResps(wait) + } +} + +// Ensures that all request responses are processed in order. +func (s *sink) handleSeqResps(wait *seqResp) { + var more bool +start: + <-wait.done + wait.promise(wait.br, wait.resp, wait.err) + + wait, more = s.seqResps.dropPeek() + if more { + goto start + } +} + +// Issues an AddPartitionsToTxnRequest before a produce request for all +// partitions that need to be added to a transaction. +func (s *sink) doTxnReq( + req *produceRequest, + txnReq *kmsg.AddPartitionsToTxnRequest, +) (stripped bool, err error) { + // If we return an unretryable error, then we have to reset everything + // to not be in the transaction and begin draining at the start. + // + // These batches must be the first in their recBuf, because we would + // not be trying to add them to a partition if they were not. + defer func() { + if err != nil { + req.batches.eachOwnerLocked(seqRecBatch.removeFromTxn) + } + }() + // We do NOT let record context cancelations fail this request: doing + // so would put the transactional ID in an unknown state. This is + // similar to the warning we give in the txn.go file, but the + // difference there is the user knows explicitly at the function call + // that canceling the context will opt them into invalid state. + err = s.cl.doWithConcurrentTransactions(s.cl.ctx, "AddPartitionsToTxn", func() error { + stripped, err = s.issueTxnReq(req, txnReq) + return err + }) + return stripped, err +} + +// Removing a batch from the transaction means we will not be issuing it +// inflight, and that it was not added to the txn and that we need to reset the +// drain index. +func (b *recBatch) removeFromTxn() { + b.owner.addedToTxn.Store(false) + b.owner.resetBatchDrainIdx() + b.decInflight() +} + +func (s *sink) issueTxnReq( + req *produceRequest, + txnReq *kmsg.AddPartitionsToTxnRequest, +) (stripped bool, fatalErr error) { + resp, err := txnReq.RequestWith(s.cl.ctx, s.cl) + if err != nil { + return false, err + } + + for _, topic := range resp.Topics { + topicBatches, ok := req.batches[topic.Topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker replied with topic in AddPartitionsToTxnResponse that was not in request", "topic", topic.Topic) + continue + } + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + // OperationNotAttempted is set for all partitions that are authorized + // if any partition is unauthorized _or_ does not exist. We simply remove + // unattempted partitions and treat them as retryable. + if !kerr.IsRetriable(err) && !errors.Is(err, kerr.OperationNotAttempted) { + fatalErr = err // auth err, etc + continue + } + + batch, ok := topicBatches[partition.Partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker replied with partition in AddPartitionsToTxnResponse that was not in request", "topic", topic.Topic, "partition", partition.Partition) + continue + } + + // We are stripping this retryable-err batch from the request, + // so we must reset that it has been added to the txn. + batch.owner.mu.Lock() + batch.removeFromTxn() + batch.owner.mu.Unlock() + + stripped = true + + delete(topicBatches, partition.Partition) + } + if len(topicBatches) == 0 { + delete(req.batches, topic.Topic) + } + } + } + return stripped, fatalErr +} + +// firstRespCheck is effectively a sink.Once. On the first response, if the +// used request version is at least 4, we upgrade our inflight sem. +// +// Starting on version 4, Kafka allowed five inflight requests while +// maintaining idempotency. Before, only one was allowed. +// +// We go through an atomic because drain can be waiting on the sem (with +// capacity one). We store four here, meaning new drain loops will load the +// higher capacity sem without read/write pointer racing a current loop. +// +// This logic does mean that we will never use the full potential 5 in flight +// outside of a small window during the store, but some pages in the Kafka +// confluence basically show that more than two in flight has marginal benefit +// anyway (although that may be due to their Java API). +// +// https://cwiki.apache.org/confluence/display/KAFKA/An+analysis+of+the+impact+of+max.in.flight.requests.per.connection+and+acks+on+Producer+performance +// https://issues.apache.org/jira/browse/KAFKA-5494 +func (s *sink) firstRespCheck(idempotent bool, version int16) { + if s.produceVersion.Load() < 0 { + s.produceVersion.Store(int32(version)) + if idempotent && version >= 4 { + s.inflightSem.Store(make(chan struct{}, 4)) + } + } +} + +// handleReqClientErr is called when the client errors before receiving a +// produce response. +func (s *sink) handleReqClientErr(req *produceRequest, err error) { + switch { + default: + s.cl.cfg.logger.Log(LogLevelWarn, "random error while producing, requeueing unattempted request", "broker", logID(s.nodeID), "err", err) + fallthrough + + case errors.Is(err, errUnknownBroker), + isDialNonTimeoutErr(err), + isRetryableBrokerErr(err): + updateMeta := !isRetryableBrokerErr(err) + if updateMeta { + s.cl.cfg.logger.Log(LogLevelInfo, "produce request failed, triggering metadata update", "broker", logID(s.nodeID), "err", err) + } + s.handleRetryBatches(req.batches, nil, req.backoffSeq, updateMeta, false, "failed produce request triggered metadata update") + + case errors.Is(err, ErrClientClosed): + s.cl.failBufferedRecords(ErrClientClosed) + } +} + +// No acks mean no response. The following block is basically an extremely +// condensed version of the logic in handleReqResp. +func (s *sink) handleReqRespNoack(b *bytes.Buffer, debug bool, req *produceRequest) { + if debug { + fmt.Fprintf(b, "noack ") + } + for topic, partitions := range req.batches { + if debug { + fmt.Fprintf(b, "%s[", topic) + } + for partition, batch := range partitions { + batch.owner.mu.Lock() + if batch.isOwnersFirstBatch() { + if debug { + fmt.Fprintf(b, "%d{0=>%d}, ", partition, len(batch.records)) + } + s.cl.finishBatch(batch.recBatch, req.producerID, req.producerEpoch, partition, 0, nil) + } else if debug { + fmt.Fprintf(b, "%d{skipped}, ", partition) + } + batch.owner.mu.Unlock() + } + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } +} + +func (s *sink) handleReqResp(br *broker, req *produceRequest, resp kmsg.Response, err error) { + if err != nil { + s.handleReqClientErr(req, err) + return + } + s.firstRespCheck(req.idempotent(), req.version) + s.consecutiveFailures.Store(0) + defer req.metrics.hook(&s.cl.cfg, br) // defer to end so that non-written batches are removed + + var b *bytes.Buffer + debug := s.cl.cfg.logger.Level() >= LogLevelDebug + if debug { + b = bytes.NewBuffer(make([]byte, 0, 128)) + defer func() { + update := b.String() + update = strings.TrimSuffix(update, ", ") + s.cl.cfg.logger.Log(LogLevelDebug, "produced", "broker", logID(s.nodeID), "to", update) + }() + } + + if req.acks == 0 { + s.handleReqRespNoack(b, debug, req) + return + } + + var kmove kip951move + var reqRetry seqRecBatches // handled at the end + + kresp := resp.(*kmsg.ProduceResponse) + for i := range kresp.Topics { + rt := &kresp.Topics[i] + topic := rt.Topic + partitions, ok := req.batches[topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker erroneously replied with topic in produce request that we did not produce to", "broker", logID(s.nodeID), "topic", topic) + delete(req.metrics, topic) + continue // should not hit this + } + + if debug { + fmt.Fprintf(b, "%s[", topic) + } + + tmetrics := req.metrics[topic] + for j := range rt.Partitions { + rp := &rt.Partitions[j] + partition := rp.Partition + batch, ok := partitions[partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker erroneously replied with partition in produce request that we did not produce to", "broker", logID(s.nodeID), "topic", rt.Topic, "partition", partition) + delete(tmetrics, partition) + continue // should not hit this + } + delete(partitions, partition) + + retry, didProduce := s.handleReqRespBatch( + b, + &kmove, + kresp, + topic, + rp, + batch, + req.producerID, + req.producerEpoch, + ) + if retry { + reqRetry.addSeqBatch(topic, partition, batch) + } + if !didProduce { + delete(tmetrics, partition) + } + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + + if len(partitions) == 0 { + delete(req.batches, topic) + } + } + + if len(req.batches) > 0 { + s.cl.cfg.logger.Log(LogLevelError, "broker did not reply to all topics / partitions in the produce request! reenqueuing missing partitions", "broker", logID(s.nodeID)) + s.handleRetryBatches(req.batches, nil, 0, true, false, "broker did not reply to all topics in produce request") + } + if len(reqRetry) > 0 { + s.handleRetryBatches(reqRetry, &kmove, 0, true, true, "produce request had retry batches") + } +} + +func (s *sink) handleReqRespBatch( + b *bytes.Buffer, + kmove *kip951move, + resp *kmsg.ProduceResponse, + topic string, + rp *kmsg.ProduceResponseTopicPartition, + batch seqRecBatch, + producerID int64, + producerEpoch int16, +) (retry, didProduce bool) { + batch.owner.mu.Lock() + defer batch.owner.mu.Unlock() + + nrec := len(batch.records) + + debug := b != nil + if debug { + fmt.Fprintf(b, "%d{", rp.Partition) + } + + // We only ever operate on the first batch in a record buf. Batches + // work sequentially; if this is not the first batch then an error + // happened and this later batch is no longer a part of a seq chain. + if !batch.isOwnersFirstBatch() { + if debug { + if err := kerr.ErrorForCode(rp.ErrorCode); err == nil { + if nrec > 0 { + fmt.Fprintf(b, "skipped@%d=>%d}, ", rp.BaseOffset, rp.BaseOffset+int64(nrec)) + } else { + fmt.Fprintf(b, "skipped@%d}, ", rp.BaseOffset) + } + } else { + if nrec > 0 { + fmt.Fprintf(b, "skipped@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } else { + fmt.Fprintf(b, "skipped@%d(%s)}, ", rp.BaseOffset, err) + } + } + } + return false, false + } + + // Since we have received a response and we are the first batch, we can + // at this point re-enable failing from load errors. + // + // We do not need a lock since the owner is locked. + batch.canFailFromLoadErrs = true + + // By default, we assume we errored. Non-error updates this back + // to true. + batch.owner.okOnSink = false + + if moving := kmove.maybeAddProducePartition(resp, rp, batch.owner); moving { + if debug { + fmt.Fprintf(b, "move:%d:%d@%d,%d}, ", rp.CurrentLeader.LeaderID, rp.CurrentLeader.LeaderEpoch, rp.BaseOffset, nrec) + } + batch.owner.failing = true + return true, false + } + + err := kerr.ErrorForCode(rp.ErrorCode) + failUnknown := batch.owner.checkUnknownFailLimit(err) + switch { + case kerr.IsRetriable(err) && + !failUnknown && + err != kerr.CorruptMessage && + batch.tries < s.cl.cfg.recordRetries: + + if debug { + fmt.Fprintf(b, "retrying@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return true, false + + case err == kerr.OutOfOrderSequenceNumber, + err == kerr.UnknownProducerID, + err == kerr.InvalidProducerIDMapping, + err == kerr.InvalidProducerEpoch: + + // OOOSN always means data loss 1.0+ and is ambiguous prior. + // We assume the worst and only continue if requested. + // + // UnknownProducerID was introduced to allow some form of safe + // handling, but KIP-360 demonstrated that resetting sequence + // numbers is fundamentally unsafe, so we treat it like OOOSN. + // + // InvalidMapping is similar to UnknownProducerID, but occurs + // when the txnal coordinator timed out our transaction. + // + // 2.5 + // ===== + // 2.5 introduced some behavior to potentially safely reset + // the sequence numbers by bumping an epoch (see KIP-360). + // + // For the idempotent producer, the solution is to fail all + // buffered records and then let the client user reset things + // with the understanding that they cannot guard against + // potential dups / reordering at that point. Realistically, + // that's no better than a config knob that allows the user + // to continue (our stopOnDataLoss flag), so for the idempotent + // producer, if stopOnDataLoss is false, we just continue. + // + // For the transactional producer, we always fail the producerID. + // EndTransaction will trigger recovery if possible. + // + // 2.7 + // ===== + // InvalidProducerEpoch became retryable in 2.7. Prior, it + // was ambiguous (timeout? fenced?). Now, InvalidProducerEpoch + // is only returned on produce, and then we can recover on other + // txn coordinator requests, which have PRODUCER_FENCED vs + // TRANSACTION_TIMED_OUT. + + if s.cl.cfg.txnID != nil || s.cl.cfg.stopOnDataLoss { + s.cl.cfg.logger.Log(LogLevelInfo, "batch errored, failing the producer ID", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "producer_id", producerID, + "producer_epoch", producerEpoch, + "err", err, + ) + s.cl.failProducerID(producerID, producerEpoch, err) + + s.cl.finishBatch(batch.recBatch, producerID, producerEpoch, rp.Partition, rp.BaseOffset, err) + if debug { + fmt.Fprintf(b, "fatal@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return false, false + } + if s.cl.cfg.onDataLoss != nil { + s.cl.cfg.onDataLoss(topic, rp.Partition) + } + + // For OOOSN, and UnknownProducerID + // + // The only recovery is to fail the producer ID, which ensures + // that all batches reset sequence numbers and use a new producer + // ID on the next batch. + // + // For InvalidProducerIDMapping && InvalidProducerEpoch, + // + // We should not be here, since this error occurs in the + // context of transactions, which are caught above. + s.cl.cfg.logger.Log(LogLevelInfo, fmt.Sprintf("batch errored with %s, failing the producer ID and resetting all sequence numbers", err.(*kerr.Error).Message), + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "producer_id", producerID, + "producer_epoch", producerEpoch, + "err", err, + ) + + // After we fail here, any new produce (even new ones + // happening concurrent with this function) will load + // a new epoch-bumped producer ID and all first-batches + // will reset sequence numbers appropriately. + s.cl.failProducerID(producerID, producerEpoch, errReloadProducerID) + if debug { + fmt.Fprintf(b, "resetting@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return true, false + + case err == kerr.DuplicateSequenceNumber: // ignorable, but we should not get + s.cl.cfg.logger.Log(LogLevelInfo, "received unexpected duplicate sequence number, ignoring and treating batch as successful", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + ) + err = nil + fallthrough + default: + if err != nil { + s.cl.cfg.logger.Log(LogLevelInfo, "batch in a produce request failed", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "err", err, + "err_is_retryable", kerr.IsRetriable(err), + "max_retries_reached", !failUnknown && batch.tries >= s.cl.cfg.recordRetries, + ) + } else { + batch.owner.okOnSink = true + } + s.cl.finishBatch(batch.recBatch, producerID, producerEpoch, rp.Partition, rp.BaseOffset, err) + didProduce = err == nil + if debug { + if err != nil { + fmt.Fprintf(b, "err@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } else { + fmt.Fprintf(b, "%d=>%d}, ", rp.BaseOffset, rp.BaseOffset+int64(nrec)) + } + } + } + return false, didProduce // no retry +} + +// finishBatch removes a batch from its owning record buffer and finishes all +// records in the batch. +// +// This is safe even if the owning recBuf migrated sinks, since we are +// finishing based off the status of an inflight req from the original sink. +func (cl *Client) finishBatch(batch *recBatch, producerID int64, producerEpoch int16, partition int32, baseOffset int64, err error) { + recBuf := batch.owner + + if err != nil { + // We know that Kafka replied this batch is a failure. We can + // fail this batch and all batches in this partition. + // This will keep sequence numbers correct. + recBuf.failAllRecords(err) + return + } + + // We know the batch made it to Kafka successfully without error. + // We remove this batch and finish all records appropriately. + finished := len(batch.records) + recBuf.batch0Seq = incrementSequence(recBuf.batch0Seq, int32(finished)) + recBuf.buffered.Add(-int64(finished)) + recBuf.batches[0] = nil + recBuf.batches = recBuf.batches[1:] + recBuf.batchDrainIdx-- + + batch.mu.Lock() + records, attrs := batch.records, batch.attrs + batch.records = nil + batch.mu.Unlock() + + cl.producer.promiseBatch(batchPromise{ + baseOffset: baseOffset, + pid: producerID, + epoch: producerEpoch, + // A recBuf.attrs is updated when appending to be written. For + // v0 && v1 produce requests, we set bit 8 in the attrs + // corresponding to our own RecordAttr's bit 8 being no + // timestamp type. Thus, we can directly convert the batch + // attrs to our own RecordAttrs. + attrs: RecordAttrs{uint8(attrs)}, + partition: partition, + recs: records, + }) +} + +// handleRetryBatches sets any first-buf-batch to failing and triggers a +// metadata that will eventually clear the failing state and re-drain. +// +// If idempotency is disabled, if a batch is timed out or hit the retry limit, +// we fail it and anything after it. +func (s *sink) handleRetryBatches( + retry seqRecBatches, + kmove *kip951move, + backoffSeq uint32, + updateMeta bool, // if we should maybe update the metadata + canFail bool, // if records can fail if they are at limits + why string, +) { + logger := s.cl.cfg.logger + debug := logger.Level() >= LogLevelDebug + var needsMetaUpdate bool + var shouldBackoff bool + if kmove != nil { + defer kmove.maybeBeginMove(s.cl) + } + var numRetryBatches, numMoveBatches int + retry.eachOwnerLocked(func(batch seqRecBatch) { + numRetryBatches++ + if !batch.isOwnersFirstBatch() { + if debug { + logger.Log(LogLevelDebug, "retry batch is not the first batch in the owner, skipping result", + "topic", batch.owner.topic, + "partition", batch.owner.partition, + ) + } + return + } + + // If the request failed due to a concurrent metadata update + // moving partitions to a different sink (or killing the sink + // this partition was on), we can just reset the drain index + // and trigger draining now the new sink. There is no reason + // to backoff on this sink nor trigger a metadata update. + if batch.owner.sink != s { + if debug { + logger.Log(LogLevelDebug, "transitioned sinks while a request was inflight, retrying immediately on new sink without backoff", + "topic", batch.owner.topic, + "partition", batch.owner.partition, + "old_sink", s.nodeID, + "new_sink", batch.owner.sink.nodeID, + ) + } + batch.owner.resetBatchDrainIdx() + return + } + + if canFail || s.cl.cfg.disableIdempotency { + if err := batch.maybeFailErr(&s.cl.cfg); err != nil { + batch.owner.failAllRecords(err) + return + } + } + + batch.owner.resetBatchDrainIdx() + + // Now that the batch drain index is reset, if this retry is + // caused from a moving batch, return early. We do not need + // to backoff nor do we need to trigger a metadata update. + if kmove.hasRecBuf(batch.owner) { + numMoveBatches++ + return + } + + // If our first batch (seq == 0) fails with unknown topic, we + // retry immediately. Kafka can reply with valid metadata + // immediately after a topic was created, before the leaders + // actually know they are leader. + unknownAndFirstBatch := batch.owner.unknownFailures == 1 && batch.owner.seq == 0 + + if unknownAndFirstBatch { + shouldBackoff = true + return + } + if updateMeta { + batch.owner.failing = true + needsMetaUpdate = true + } + }) + + if debug { + logger.Log(LogLevelDebug, "retry batches processed", + "wanted_metadata_update", updateMeta, + "triggering_metadata_update", needsMetaUpdate, + "should_backoff", shouldBackoff, + ) + } + + // If we do want to metadata update, we only do so if any batch was the + // first batch in its buf / not concurrently failed. + if needsMetaUpdate { + s.cl.triggerUpdateMetadata(true, why) + return + } + + // We could not need a metadata update for two reasons: + // + // * our request died when being issued + // + // * we would update metadata, but what failed was the first batch + // produced and the error was unknown topic / partition. + // + // In either of these cases, we should backoff a little bit to avoid + // spin looping. + // + // If neither of these cases are true, then we entered wanting a + // metadata update, but the batches either were not the first batch, or + // the batches were concurrently failed. + // + // If all partitions are moving, we do not need to backoff nor drain. + if shouldBackoff || (!updateMeta && numRetryBatches != numMoveBatches) { + s.maybeTriggerBackoff(backoffSeq) + s.maybeDrain() + } +} + +// addRecBuf adds a new record buffer to be drained to a sink and clears the +// buffer's failing state. +func (s *sink) addRecBuf(add *recBuf) { + s.recBufsMu.Lock() + add.recBufsIdx = len(s.recBufs) + s.recBufs = append(s.recBufs, add) + s.recBufsMu.Unlock() + + add.clearFailing() +} + +// removeRecBuf removes a record buffer from a sink. +func (s *sink) removeRecBuf(rm *recBuf) { + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + + if rm.recBufsIdx != len(s.recBufs)-1 { + s.recBufs[rm.recBufsIdx], s.recBufs[len(s.recBufs)-1] = s.recBufs[len(s.recBufs)-1], nil + s.recBufs[rm.recBufsIdx].recBufsIdx = rm.recBufsIdx + } else { + s.recBufs[rm.recBufsIdx] = nil // do not let this removal hang around + } + + s.recBufs = s.recBufs[:len(s.recBufs)-1] + if s.recBufsStart == len(s.recBufs) { + s.recBufsStart = 0 + } +} + +// recBuf is a buffer of records being produced to a partition and being +// drained by a sink. This is only not drained if the partition has a load +// error and thus does not a have a sink to be drained into. +type recBuf struct { + cl *Client // for cfg, record finishing + + topic string + partition int32 + + // The number of bytes we can buffer in a batch for this particular + // topic/partition. This may be less than the configured + // maxRecordBatchBytes because of produce request overhead. + maxRecordBatchBytes int32 + + // addedToTxn, for transactions only, signifies whether this partition + // has been added to the transaction yet or not. + addedToTxn atomicBool + + // For LoadTopicPartitioner partitioning; atomically tracks the number + // of records buffered in total on this recBuf. + buffered atomicI64 + + mu sync.Mutex // guards r/w access to all fields below + + // sink is who is currently draining us. This can be modified + // concurrently during a metadata update. + // + // The first set to a non-nil sink is done without a mutex. + // + // Since only metadata updates can change the sink, metadata updates + // also read this without a mutex. + sink *sink + // recBufsIdx is our index into our current sink's recBufs field. + // This exists to aid in removing the buffer from the sink. + recBufsIdx int + + // A concurrent metadata update can move a recBuf from one sink to + // another while requests are inflight on the original sink. We do not + // want to allow new requests to start on the new sink until they all + // finish on the old, because with some pathological request order + // finishing, we would allow requests to finish out of order: + // handleSeqResps works per sink, not across sinks. + inflightOnSink *sink + // We only want to allow more than 1 inflight on a sink *if* we are + // currently receiving successful responses. Unimportantly, this allows + // us to save resources if the broker is having a problem or just + // recovered from one. Importantly, we work around an edge case in + // Kafka. Kafka will accept the first produce request for a pid/epoch + // with *any* sequence number. Say we sent two requests inflight. The + // first request Kafka replies to with NOT_LEADER_FOR_PARTITION, the + // second, the broker finished setting up and accepts. The broker now + // has the second request but not the first, we will retry both + // requests and receive OOOSN, and the broker has logs out of order. + // By only allowing more than one inflight if we have seen an ok + // response, we largely eliminate risk of this problem. See #223 for + // more details. + okOnSink bool + // Inflight tracks the number of requests inflight using batches from + // this recBuf. Every time this hits zero, if the batchDrainIdx is not + // at the end, we clear inflightOnSink and trigger the *current* sink + // to drain. + inflight uint8 + + topicPartitionData // updated in metadata migrateProductionTo (same spot sink is updated) + + // seq is used for the seq in each record batch. It is incremented when + // produce requests are made and can be reset on errors to batch0Seq. + // + // If idempotency is disabled, we just use "0" for the first sequence + // when encoding our payload. + // + // This is also used to check the first batch produced (disregarding + // seq resets) -- see handleRetryBatches. + seq int32 + // batch0Seq is the seq of the batch at batchDrainIdx 0. If we reset + // the drain index, we reset seq with this number. If we successfully + // finish batch 0, we bump this. + batch0Seq int32 + // If we need to reset sequence numbers, we set needSeqReset, and then + // when we use the **first** batch, we reset sequences to 0. + needSeqReset bool + + // batches is our list of buffered records. Batches are appended as the + // final batch crosses size thresholds or as drain freezes batches from + // further modification. + // + // Most functions in a sink only operate on a batch if the batch is the + // first batch in a buffer. This is necessary to ensure that all + // records are truly finished without error in order. + batches []*recBatch + // batchDrainIdx is where the next batch will drain from. We only + // remove from the head of batches when a batch is finished. + // This is read while buffering and modified in a few places. + batchDrainIdx int + + // If we fail with UNKNOWN_TOPIC_OR_PARTITION, we bump this and fail + // all records once this exceeds the config's unknown topic fail limit. + // If we ever see a different error (or no error), this is reset. + unknownFailures int64 + + // lingering is a timer that avoids starting maybeDrain until expiry, + // allowing for more records to be buffered in a single batch. + // + // Note that if something else starts a drain, if the first batch of + // this buffer fits into the request, it will be used. + // + // This is on recBuf rather than Sink to avoid some complicated + // interactions of triggering the sink to loop or not. Ideally, with + // the sticky partition hashers, we will only have a few partitions + // lingering and that this is on a RecBuf should not matter. + lingering *time.Timer + + // failing is set when we encounter a temporary partition error during + // producing, such as UnknownTopicOrPartition (signifying the partition + // moved to a different broker). + // + // It is always cleared on metadata update. + failing bool + + // Only possibly set in PurgeTopics, this is used to fail anything that + // was in the process of being buffered. + purged bool +} + +// bufferRecord usually buffers a record, but does not if abortOnNewBatch is +// true and if this function would create a new batch. +// +// This returns whether the promised record was processed or not (buffered or +// immediately errored). +func (recBuf *recBuf) bufferRecord(pr promisedRec, abortOnNewBatch bool) bool { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + + // We truncate to milliseconds to avoid some accumulated rounding error + // problems (see IBM/sarama#1455) + if pr.Timestamp.IsZero() { + pr.Timestamp = time.Now() + } + pr.Timestamp = pr.Timestamp.Truncate(time.Millisecond) + pr.Partition = recBuf.partition // set now, for the hook below + + if recBuf.purged { + recBuf.cl.producer.promiseRecord(pr, errPurged) + return true + } + + var ( + newBatch = true + onDrainBatch = recBuf.batchDrainIdx == len(recBuf.batches) + produceVersion = recBuf.sink.produceVersion.Load() + ) + + if !onDrainBatch { + batch := recBuf.batches[len(recBuf.batches)-1] + appended, _ := batch.tryBuffer(pr, produceVersion, recBuf.maxRecordBatchBytes, false) + newBatch = !appended + } + + if newBatch { + newBatch := recBuf.newRecordBatch() + appended, aborted := newBatch.tryBuffer(pr, produceVersion, recBuf.maxRecordBatchBytes, abortOnNewBatch) + + switch { + case aborted: // not processed + return false + case appended: // we return true below + default: // processed as failure + recBuf.cl.producer.promiseRecord(pr, kerr.MessageTooLarge) + return true + } + + recBuf.batches = append(recBuf.batches, newBatch) + } + + if recBuf.cl.cfg.linger == 0 { + if onDrainBatch { + recBuf.sink.maybeDrain() + } + } else { + // With linger, if this is a new batch but not the first, we + // stop lingering and begin draining. The drain loop will + // restart our linger once this buffer has one batch left. + if newBatch && !onDrainBatch || + // If this is the first batch, try lingering; if + // we cannot, we are being flushed and must drain. + onDrainBatch && !recBuf.lockedMaybeStartLinger() { + recBuf.lockedStopLinger() + recBuf.sink.maybeDrain() + } + } + + recBuf.buffered.Add(1) + + if recBuf.cl.producer.hooks != nil && len(recBuf.cl.producer.hooks.partitioned) > 0 { + for _, h := range recBuf.cl.producer.hooks.partitioned { + h.OnProduceRecordPartitioned(pr.Record, recBuf.sink.nodeID) + } + } + + return true +} + +// Stops lingering, potentially restarting it, and returns whether there is +// more to drain. +// +// If lingering, if there are more than one batches ready, there is definitely +// more to drain and we should not linger. Otherwise, if we cannot restart +// lingering, then we are flushing and also indicate there is more to drain. +func (recBuf *recBuf) tryStopLingerForDraining() bool { + recBuf.lockedStopLinger() + canLinger := recBuf.cl.cfg.linger == 0 + moreToDrain := !canLinger && len(recBuf.batches) > recBuf.batchDrainIdx || + canLinger && (len(recBuf.batches) > recBuf.batchDrainIdx+1 || + len(recBuf.batches) == recBuf.batchDrainIdx+1 && !recBuf.lockedMaybeStartLinger()) + return moreToDrain +} + +// Begins a linger timer unless the producer is being flushed. +func (recBuf *recBuf) lockedMaybeStartLinger() bool { + if recBuf.cl.producer.flushing.Load() > 0 || recBuf.cl.producer.blocked.Load() > 0 { + return false + } + recBuf.lingering = time.AfterFunc(recBuf.cl.cfg.linger, recBuf.sink.maybeDrain) + return true +} + +func (recBuf *recBuf) lockedStopLinger() { + if recBuf.lingering != nil { + recBuf.lingering.Stop() + recBuf.lingering = nil + } +} + +func (recBuf *recBuf) unlingerAndManuallyDrain() { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + recBuf.lockedStopLinger() + recBuf.sink.maybeDrain() +} + +// bumpRepeatedLoadErr is provided to bump a buffer's number of consecutive +// load errors during metadata updates. +// +// Partition load errors are generally temporary (leader/listener/replica not +// available), and this try bump is not expected to do much. If for some reason +// a partition errors for a long time and we are not idempotent, this function +// drops all buffered records. +func (recBuf *recBuf) bumpRepeatedLoadErr(err error) { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + if len(recBuf.batches) == 0 { + return + } + batch0 := recBuf.batches[0] + batch0.tries++ + + // We need to lock the batch as well because there could be a buffered + // request about to be written. Writing requests only grabs the batch + // mu, not the recBuf mu. + batch0.mu.Lock() + var ( + canFail = !recBuf.cl.idempotent() || batch0.canFailFromLoadErrs // we can only fail if we are not idempotent or if we have no outstanding requests + batch0Fail = batch0.maybeFailErr(&recBuf.cl.cfg) != nil // timeout, retries, or aborting + netErr = isRetryableBrokerErr(err) || isDialNonTimeoutErr(err) // we can fail if this is *not* a network error + retryableKerr = kerr.IsRetriable(err) // we fail if this is not a retryable kerr, + isUnknownLimit = recBuf.checkUnknownFailLimit(err) // or if it is, but it is UnknownTopicOrPartition and we are at our limit + + willFail = canFail && (batch0Fail || !netErr && (!retryableKerr || retryableKerr && isUnknownLimit)) + ) + batch0.isFailingFromLoadErr = willFail + batch0.mu.Unlock() + + recBuf.cl.cfg.logger.Log(LogLevelWarn, "produce partition load error, bumping error count on first stored batch", + "broker", logID(recBuf.sink.nodeID), + "topic", recBuf.topic, + "partition", recBuf.partition, + "err", err, + "can_fail", canFail, + "batch0_should_fail", batch0Fail, + "is_network_err", netErr, + "is_retryable_kerr", retryableKerr, + "is_unknown_limit", isUnknownLimit, + "will_fail", willFail, + ) + + if willFail { + recBuf.failAllRecords(err) + } +} + +// Called locked, if err is an unknown error, bumps our limit, otherwise resets +// it. This returns if we have reached or exceeded the limit. +func (recBuf *recBuf) checkUnknownFailLimit(err error) bool { + if errors.Is(err, kerr.UnknownTopicOrPartition) { + recBuf.unknownFailures++ + } else { + recBuf.unknownFailures = 0 + } + return recBuf.cl.cfg.maxUnknownFailures >= 0 && recBuf.unknownFailures > recBuf.cl.cfg.maxUnknownFailures +} + +// failAllRecords fails all buffered records in this recBuf. +// This is used anywhere where we have to fail and remove an entire batch, +// if we just removed the one batch, the seq num chain would be broken. +// +// - from fatal InitProducerID or AddPartitionsToTxn +// - from client closing +// - if not idempotent && hit retry / timeout limit +// - if batch fails fatally when producing +func (recBuf *recBuf) failAllRecords(err error) { + recBuf.lockedStopLinger() + for _, batch := range recBuf.batches { + // We need to guard our clearing of records against a + // concurrent produceRequest's write, which can have this batch + // buffered wile we are failing. + // + // We do not need to worry about concurrent recBuf + // modifications to this batch because the recBuf is already + // locked. + batch.mu.Lock() + records := batch.records + batch.records = nil + batch.mu.Unlock() + + recBuf.cl.producer.promiseBatch(batchPromise{ + recs: records, + err: err, + }) + } + recBuf.resetBatchDrainIdx() + recBuf.buffered.Store(0) + recBuf.batches = nil +} + +// clearFailing clears a buffer's failing state if it is failing. +// +// This is called when a buffer is added to a sink (to clear a failing state +// from migrating buffers between sinks) or when a metadata update sees the +// sink is still on the same source. +func (recBuf *recBuf) clearFailing() { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + + recBuf.failing = false + if len(recBuf.batches) != recBuf.batchDrainIdx { + recBuf.sink.maybeDrain() + } +} + +func (recBuf *recBuf) resetBatchDrainIdx() { + recBuf.seq = recBuf.batch0Seq + recBuf.batchDrainIdx = 0 +} + +// promisedRec ties a record with the callback that will be called once +// a batch is finally written and receives a response. +type promisedRec struct { + ctx context.Context + promise func(*Record, error) + *Record +} + +func (pr promisedRec) cancelingCtx() context.Context { + if pr.ctx.Done() != nil { + return pr.ctx + } + if pr.Context.Done() != nil { + return pr.Context + } + return nil +} + +// recBatch is the type used for buffering records before they are written. +type recBatch struct { + owner *recBuf // who owns us + + tries int64 // if this was sent before and is thus now immutable + + // We can only fail a batch if we have never issued it, or we have + // issued it and have received a response. If we do not receive a + // response, we cannot know whether we actually wrote bytes that Kafka + // processed or not. So, we set this to false every time we issue a + // request with this batch, and then reset it to true whenever we + // process a response. + canFailFromLoadErrs bool + // If we are going to fail the batch in bumpRepeatedLoadErr, we need to + // set this bool to true. There could be a concurrent request about to + // be written. See more comments below where this is used. + isFailingFromLoadErr bool + + wireLength int32 // tracks total size this batch would currently encode as, including length prefix + v1wireLength int32 // same as wireLength, but for message set v1 + + attrs int16 // updated during apending; read and converted to RecordAttrs on success + firstTimestamp int64 // since unix epoch, in millis + maxTimestampDelta int64 + + mu sync.Mutex // guards appendTo's reading of records against failAllRecords emptying it + records []promisedRec // record w/ length, ts calculated +} + +// Returns an error if the batch should fail. +func (b *recBatch) maybeFailErr(cfg *cfg) error { + if len(b.records) > 0 { + r0 := &b.records[0] + select { + case <-r0.ctx.Done(): + return r0.ctx.Err() + case <-r0.Context.Done(): + return r0.Context.Err() + default: + } + } + switch { + case b.isTimedOut(cfg.recordTimeout): + return ErrRecordTimeout + case b.tries >= cfg.recordRetries: + return ErrRecordRetries + case b.owner.cl.producer.isAborting(): + return ErrAborting + } + return nil +} + +func (b *recBatch) v0wireLength() int32 { return b.v1wireLength - 8 } // no timestamp +func (b *recBatch) batchLength() int32 { return b.wireLength - 4 } // no length prefix +func (b *recBatch) flexibleWireLength() int32 { // uvarint length prefix + batchLength := b.batchLength() + return int32(kbin.UvarintLen(uvar32(batchLength))) + batchLength +} + +// appendRecord saves a new record to a batch. +// +// This is called under the owning recBuf's mu, meaning records cannot be +// concurrently modified by failing. This batch cannot actively be used +// in a request, so we do not need to worry about a concurrent read. +func (b *recBatch) appendRecord(pr promisedRec, nums recordNumbers) { + b.wireLength += nums.wireLength() + b.v1wireLength += messageSet1Length(pr.Record) + if len(b.records) == 0 { + b.firstTimestamp = pr.Timestamp.UnixNano() / 1e6 + } else if nums.tsDelta > b.maxTimestampDelta { + b.maxTimestampDelta = nums.tsDelta + } + b.records = append(b.records, pr) +} + +// newRecordBatch returns a new record batch for a topic and partition. +func (recBuf *recBuf) newRecordBatch() *recBatch { + const recordBatchOverhead = 4 + // array len + 8 + // firstOffset + 4 + // batchLength + 4 + // partitionLeaderEpoch + 1 + // magic + 4 + // crc + 2 + // attributes + 4 + // lastOffsetDelta + 8 + // firstTimestamp + 8 + // maxTimestamp + 8 + // producerID + 2 + // producerEpoch + 4 + // seq + 4 // record array length + return &recBatch{ + owner: recBuf, + records: recBuf.cl.prsPool.get()[:0], + wireLength: recordBatchOverhead, + + canFailFromLoadErrs: true, // until we send this batch, we can fail it + } +} + +type prsPool struct{ p *sync.Pool } + +func newPrsPool() prsPool { + return prsPool{ + p: &sync.Pool{New: func() any { r := make([]promisedRec, 10); return &r }}, + } +} + +func (p prsPool) get() []promisedRec { return (*p.p.Get().(*[]promisedRec))[:0] } +func (p prsPool) put(s []promisedRec) { p.p.Put(&s) } + +// isOwnersFirstBatch returns if the batch in a recBatch is the first batch in +// a records. We only ever want to update batch / buffer logic if the batch is +// the first in the buffer. +func (b *recBatch) isOwnersFirstBatch() bool { + return len(b.owner.batches) > 0 && b.owner.batches[0] == b +} + +// Returns whether the first record in a batch is past the limit. +func (b *recBatch) isTimedOut(limit time.Duration) bool { + if limit == 0 { + return false + } + return time.Since(b.records[0].Timestamp) > limit +} + +// Decrements the inflight count for this batch. +// +// If the inflight count hits zero, this potentially re-triggers a drain on the +// *current* sink. A concurrent metadata update could have moved the recBuf to +// a different sink; that sink will not drain this recBuf until all requests on +// the old sink are finished. +// +// This is always called in the produce request path, not anywhere else (i.e. +// not failAllRecords). We want inflight decrementing to be the last thing that +// happens always for every request. It does not matter if the records were +// independently failed: from the request issuing perspective, the batch is +// still inflight. +func (b *recBatch) decInflight() { + recBuf := b.owner + recBuf.inflight-- + if recBuf.inflight != 0 { + return + } + recBuf.inflightOnSink = nil + if recBuf.batchDrainIdx != len(recBuf.batches) { + recBuf.sink.maybeDrain() + } +} + +//////////////////// +// produceRequest // +//////////////////// + +// produceRequest is a kmsg.Request that is used when we want to +// flush our buffered records. +// +// It is the same as kmsg.ProduceRequest, but with a custom AppendTo. +type produceRequest struct { + version int16 + + backoffSeq uint32 + + txnID *string + acks int16 + timeout int32 + batches seqRecBatches + + producerID int64 + producerEpoch int16 + + // Initialized in AppendTo, metrics tracks uncompressed & compressed + // sizes (in byteS) of each batch. + // + // We use this in handleReqResp for the OnProduceHook. + metrics produceMetrics + hasHook bool + + compressor *compressor + + // wireLength is initially the size of sending a produce request, + // including the request header, with no topics. We start with the + // non-flexible size because it is strictly larger than flexible, but + // we use the proper flexible numbers when calculating. + wireLength int32 + wireLengthLimit int32 +} + +type produceMetrics map[string]map[int32]ProduceBatchMetrics + +func (p produceMetrics) hook(cfg *cfg, br *broker) { + if len(p) == 0 { + return + } + var hooks []HookProduceBatchWritten + cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookProduceBatchWritten); ok { + hooks = append(hooks, h) + } + }) + if len(hooks) == 0 { + return + } + go func() { + for _, h := range hooks { + for topic, partitions := range p { + for partition, metrics := range partitions { + h.OnProduceBatchWritten(br.meta, topic, partition, metrics) + } + } + } + }() +} + +func (p *produceRequest) idempotent() bool { return p.producerID >= 0 } + +func (p *produceRequest) tryAddBatch(produceVersion int32, recBuf *recBuf, batch *recBatch) bool { + batchWireLength, flexible := batch.wireLengthForProduceVersion(produceVersion) + batchWireLength += 4 // int32 partition prefix + + if partitions, exists := p.batches[recBuf.topic]; !exists { + lt := int32(len(recBuf.topic)) + if flexible { + batchWireLength += uvarlen(len(recBuf.topic)) + lt + 1 // compact string len, topic, compact array len for 1 item + } else { + batchWireLength += 2 + lt + 4 // string len, topic, partition array len + } + } else if flexible { + // If the topic exists and we are flexible, adding this + // partition may increase the length of our size prefix. + lastPartitionsLen := uvarlen(len(partitions)) + newPartitionsLen := uvarlen(len(partitions) + 1) + batchWireLength += (newPartitionsLen - lastPartitionsLen) + } + // If we are flexible but do not know it yet, adding partitions may + // increase our length prefix. Since we are pessimistically assuming + // non-flexible, we have 200mil partitions to add before we have to + // worry about hitting 5 bytes vs. the non-flexible 4. We do not worry. + + if p.wireLength+batchWireLength > p.wireLengthLimit { + return false + } + + if recBuf.batches[0] == batch { + if !p.idempotent() || batch.canFailFromLoadErrs { + if err := batch.maybeFailErr(&batch.owner.cl.cfg); err != nil { + recBuf.failAllRecords(err) + return false + } + } + if recBuf.needSeqReset { + recBuf.needSeqReset = false + recBuf.seq = 0 + recBuf.batch0Seq = 0 + } + } + + batch.tries++ + p.wireLength += batchWireLength + p.batches.addBatch( + recBuf.topic, + recBuf.partition, + recBuf.seq, + batch, + ) + return true +} + +// seqRecBatch: a recBatch with a sequence number. +type seqRecBatch struct { + seq int32 + *recBatch +} + +type seqRecBatches map[string]map[int32]seqRecBatch + +func (rbs *seqRecBatches) addBatch(topic string, part, seq int32, batch *recBatch) { + if *rbs == nil { + *rbs = make(seqRecBatches, 5) + } + topicBatches, exists := (*rbs)[topic] + if !exists { + topicBatches = make(map[int32]seqRecBatch, 1) + (*rbs)[topic] = topicBatches + } + topicBatches[part] = seqRecBatch{seq, batch} +} + +func (rbs *seqRecBatches) addSeqBatch(topic string, part int32, batch seqRecBatch) { + if *rbs == nil { + *rbs = make(seqRecBatches, 5) + } + topicBatches, exists := (*rbs)[topic] + if !exists { + topicBatches = make(map[int32]seqRecBatch, 1) + (*rbs)[topic] = topicBatches + } + topicBatches[part] = batch +} + +func (rbs seqRecBatches) each(fn func(seqRecBatch)) { + for _, partitions := range rbs { + for _, batch := range partitions { + fn(batch) + } + } +} + +func (rbs seqRecBatches) eachOwnerLocked(fn func(seqRecBatch)) { + rbs.each(func(batch seqRecBatch) { + batch.owner.mu.Lock() + defer batch.owner.mu.Unlock() + fn(batch) + }) +} + +func (rbs seqRecBatches) sliced() recBatches { + var batches []*recBatch + for _, partitions := range rbs { + for _, batch := range partitions { + batches = append(batches, batch.recBatch) + } + } + return batches +} + +type recBatches []*recBatch + +func (bs recBatches) eachOwnerLocked(fn func(*recBatch)) { + for _, b := range bs { + b.owner.mu.Lock() + fn(b) + b.owner.mu.Unlock() + } +} + +////////////// +// COUNTING // - this section is all about counting how bytes lay out on the wire +////////////// + +// Returns the non-flexible base produce request length (the request header and +// the request itself with no topics). +// +// See the large comment on maxRecordBatchBytesForTopic for why we always use +// non-flexible (in short: it is strictly larger). +func (cl *Client) baseProduceRequestLength() int32 { + const messageRequestOverhead int32 = 4 + // int32 length prefix + 2 + // int16 key + 2 + // int16 version + 4 + // int32 correlation ID + 2 // int16 client ID len (always non flexible) + // empty tag section skipped; see below + + const produceRequestBaseOverhead int32 = 2 + // int16 transactional ID len (flexible or not, since we cap at 16382) + 2 + // int16 acks + 4 + // int32 timeout + 4 // int32 topics non-flexible array length + // empty tag section skipped; see below + + baseLength := messageRequestOverhead + produceRequestBaseOverhead + if cl.cfg.id != nil { + baseLength += int32(len(*cl.cfg.id)) + } + if cl.cfg.txnID != nil { + baseLength += int32(len(*cl.cfg.txnID)) + } + return baseLength +} + +// Returns the maximum size a record batch can be for this given topic, such +// that if just a **single partition** is fully stuffed with records and we +// only encode that one partition, we will not overflow our configured limits. +// +// The maximum topic length is 249, which has a 2 byte prefix for flexible or +// non-flexible. +// +// Non-flexible versions will have a 4 byte length topic array prefix, a 4 byte +// length partition array prefix. and a 4 byte records array length prefix. +// +// Flexible versions would have a 1 byte length topic array prefix, a 1 byte +// length partition array prefix, up to 5 bytes for the records array length +// prefix, and three empty tag sections resulting in 3 bytes (produce request +// struct, topic struct, partition struct). As well, for the request header +// itself, we have an additional 1 byte tag section (that we currently keep +// empty). +// +// Thus in the worst case, we have 14 bytes of prefixes for non-flexible vs. +// 11 bytes for flexible. We default to the more limiting size: non-flexible. +func (cl *Client) maxRecordBatchBytesForTopic(topic string) int32 { + minOnePartitionBatchLength := cl.baseProduceRequestLength() + + 2 + // int16 topic string length prefix length + int32(len(topic)) + + 4 + // int32 partitions array length + 4 + // partition int32 encoding length + 4 // int32 record bytes array length + + wireLengthLimit := cl.cfg.maxBrokerWriteBytes + + recordBatchLimit := wireLengthLimit - minOnePartitionBatchLength + if cfgLimit := cl.cfg.maxRecordBatchBytes; cfgLimit < recordBatchLimit { + recordBatchLimit = cfgLimit + } + return recordBatchLimit +} + +func messageSet0Length(r *Record) int32 { + const length = 4 + // array len + 8 + // offset + 4 + // size + 4 + // crc + 1 + // magic + 1 + // attributes + 4 + // key array bytes len + 4 // value array bytes len + return length + int32(len(r.Key)) + int32(len(r.Value)) +} + +func messageSet1Length(r *Record) int32 { + return messageSet0Length(r) + 8 // timestamp +} + +// Returns the numbers for a record if it were added to the record batch. +func (b *recBatch) calculateRecordNumbers(r *Record) recordNumbers { + tsMillis := r.Timestamp.UnixNano() / 1e6 + tsDelta := tsMillis - b.firstTimestamp + + // If this is to be the first record in the batch, then our timestamp + // delta is actually 0. + if len(b.records) == 0 { + tsDelta = 0 + } + + offsetDelta := int32(len(b.records)) // since called before adding record, delta is the current end + + l := 1 + // attributes, int8 unused + kbin.VarlongLen(tsDelta) + + kbin.VarintLen(offsetDelta) + + kbin.VarintLen(int32(len(r.Key))) + + len(r.Key) + + kbin.VarintLen(int32(len(r.Value))) + + len(r.Value) + + kbin.VarintLen(int32(len(r.Headers))) // varint array len headers + + for _, h := range r.Headers { + l += kbin.VarintLen(int32(len(h.Key))) + + len(h.Key) + + kbin.VarintLen(int32(len(h.Value))) + + len(h.Value) + } + + return recordNumbers{ + lengthField: int32(l), + tsDelta: tsDelta, + } +} + +func uvar32(l int32) uint32 { return 1 + uint32(l) } +func uvarlen(l int) int32 { return int32(kbin.UvarintLen(uvar32(int32(l)))) } + +// recordNumbers tracks a few numbers for a record that is buffered. +type recordNumbers struct { + lengthField int32 // the length field prefix of a record encoded on the wire + tsDelta int64 // the ms delta of when the record was added against the first timestamp +} + +// wireLength is the wire length of a record including its length field prefix. +func (n recordNumbers) wireLength() int32 { + return int32(kbin.VarintLen(n.lengthField)) + n.lengthField +} + +func (b *recBatch) wireLengthForProduceVersion(v int32) (batchWireLength int32, flexible bool) { + batchWireLength = b.wireLength + + // If we do not yet know the produce version, we default to the largest + // size. Our request building sizes will always be an overestimate. + if v < 0 { + v1BatchWireLength := b.v1wireLength + if v1BatchWireLength > batchWireLength { + batchWireLength = v1BatchWireLength + } + flexibleBatchWireLength := b.flexibleWireLength() + if flexibleBatchWireLength > batchWireLength { + batchWireLength = flexibleBatchWireLength + } + } else { + switch v { + case 0, 1: + batchWireLength = b.v0wireLength() + case 2: + batchWireLength = b.v1wireLength + case 3, 4, 5, 6, 7, 8: + batchWireLength = b.wireLength + default: + batchWireLength = b.flexibleWireLength() + flexible = true + } + } + + return +} + +func (b *recBatch) tryBuffer(pr promisedRec, produceVersion, maxBatchBytes int32, abortOnNewBatch bool) (appended, aborted bool) { + nums := b.calculateRecordNumbers(pr.Record) + + batchWireLength, _ := b.wireLengthForProduceVersion(produceVersion) + newBatchLength := batchWireLength + nums.wireLength() + + if b.tries != 0 || newBatchLength > maxBatchBytes { + return false, false + } + if abortOnNewBatch { + return false, true + } + b.appendRecord(pr, nums) + pr.setLengthAndTimestampDelta( + nums.lengthField, + nums.tsDelta, + ) + return true, false +} + +////////////// +// ENCODING // - this section is all about actually writing a produce request +////////////// + +func (*produceRequest) Key() int16 { return 0 } +func (*produceRequest) MaxVersion() int16 { return 10 } +func (p *produceRequest) SetVersion(v int16) { p.version = v } +func (p *produceRequest) GetVersion() int16 { return p.version } +func (p *produceRequest) IsFlexible() bool { return p.version >= 9 } +func (p *produceRequest) AppendTo(dst []byte) []byte { + flexible := p.IsFlexible() + + if p.hasHook { + p.metrics = make(map[string]map[int32]ProduceBatchMetrics) + } + + if p.version >= 3 { + if flexible { + dst = kbin.AppendCompactNullableString(dst, p.txnID) + } else { + dst = kbin.AppendNullableString(dst, p.txnID) + } + } + + dst = kbin.AppendInt16(dst, p.acks) + dst = kbin.AppendInt32(dst, p.timeout) + if flexible { + dst = kbin.AppendCompactArrayLen(dst, len(p.batches)) + } else { + dst = kbin.AppendArrayLen(dst, len(p.batches)) + } + + for topic, partitions := range p.batches { + if flexible { + dst = kbin.AppendCompactString(dst, topic) + dst = kbin.AppendCompactArrayLen(dst, len(partitions)) + } else { + dst = kbin.AppendString(dst, topic) + dst = kbin.AppendArrayLen(dst, len(partitions)) + } + + var tmetrics map[int32]ProduceBatchMetrics + if p.hasHook { + tmetrics = make(map[int32]ProduceBatchMetrics) + p.metrics[topic] = tmetrics + } + + for partition, batch := range partitions { + dst = kbin.AppendInt32(dst, partition) + batch.mu.Lock() + if batch.records == nil || batch.isFailingFromLoadErr { // concurrent failAllRecords OR concurrent bumpRepeatedLoadErr + if flexible { + dst = kbin.AppendCompactNullableBytes(dst, nil) + } else { + dst = kbin.AppendNullableBytes(dst, nil) + } + batch.mu.Unlock() + continue + } + batch.canFailFromLoadErrs = false // we are going to write this batch: the response status is now unknown + var pmetrics ProduceBatchMetrics + if p.version < 3 { + dst, pmetrics = batch.appendToAsMessageSet(dst, uint8(p.version), p.compressor) + } else { + dst, pmetrics = batch.appendTo(dst, p.version, p.producerID, p.producerEpoch, p.txnID != nil, p.compressor) + } + batch.mu.Unlock() + if p.hasHook { + tmetrics[partition] = pmetrics + } + if flexible { + dst = append(dst, 0) + } + } + if flexible { + dst = append(dst, 0) + } + } + if flexible { + dst = append(dst, 0) + } + + return dst +} + +func (*produceRequest) ReadFrom([]byte) error { + panic("unreachable -- the client never uses ReadFrom on its internal produceRequest") +} + +func (p *produceRequest) ResponseKind() kmsg.Response { + r := kmsg.NewPtrProduceResponse() + r.Version = p.version + return r +} + +func (b seqRecBatch) appendTo( + in []byte, + version int16, + producerID int64, + producerEpoch int16, + transactional bool, + compressor *compressor, +) (dst []byte, m ProduceBatchMetrics) { // named return so that our defer for flexible versions can modify it + flexible := version >= 9 + dst = in + nullableBytesLen := b.wireLength - 4 // NULLABLE_BYTES leading length, minus itself + nullableBytesLenAt := len(dst) // in case compression adjusting + dst = kbin.AppendInt32(dst, nullableBytesLen) + + // With flexible versions, the array length prefix can be anywhere from + // 1 byte long to 5 bytes long (covering up to 268MB). + // + // We have to add our initial understanding of the array length as a + // uvarint, but if compressing shrinks what that length would encode + // as, we have to shift everything down. + if flexible { + dst = dst[:nullableBytesLenAt] + batchLength := b.batchLength() + dst = kbin.AppendUvarint(dst, uvar32(batchLength)) // compact array non-null prefix + batchAt := len(dst) + defer func() { + batch := dst[batchAt:] + if int32(len(batch)) == batchLength { // we did not compress: simply return + return + } + + // We *only* could have shrunk the batch bytes, so our + // append here will not overwrite anything we need to + // keep. + newDst := kbin.AppendUvarint(dst[:nullableBytesLenAt], uvar32(int32(len(batch)))) + + // If our append did not shorten the length prefix, we + // can just return the prior dst, otherwise we have to + // shift the batch itself down on newDst. + if len(newDst) != batchAt { + dst = append(newDst, batch...) + } + }() + } + + // Below here, we append the actual record batch, which cannot be + // flexible. Everything encodes properly; flexible adjusting is done in + // the defer just above. + + dst = kbin.AppendInt64(dst, 0) // firstOffset, defined as zero for producing + + batchLen := nullableBytesLen - 8 - 4 // length of what follows this field (so, minus what came before and ourself) + batchLenAt := len(dst) // in case compression adjusting + dst = kbin.AppendInt32(dst, batchLen) + + dst = kbin.AppendInt32(dst, -1) // partitionLeaderEpoch, unused in clients + dst = kbin.AppendInt8(dst, 2) // magic, defined as 2 for records v0.11.0+ + + crcStart := len(dst) // fill at end + dst = kbin.AppendInt32(dst, 0) // reserved crc + + attrsAt := len(dst) // in case compression adjusting + b.attrs = 0 + if transactional { + b.attrs |= 0x0010 // bit 5 is the "is transactional" bit + } + dst = kbin.AppendInt16(dst, b.attrs) + dst = kbin.AppendInt32(dst, int32(len(b.records)-1)) // lastOffsetDelta + dst = kbin.AppendInt64(dst, b.firstTimestamp) + dst = kbin.AppendInt64(dst, b.firstTimestamp+b.maxTimestampDelta) + + seq := b.seq + if producerID < 0 { // a negative producer ID means we are not using idempotence + seq = 0 + } + dst = kbin.AppendInt64(dst, producerID) + dst = kbin.AppendInt16(dst, producerEpoch) + dst = kbin.AppendInt32(dst, seq) + + dst = kbin.AppendArrayLen(dst, len(b.records)) + recordsAt := len(dst) + for i, pr := range b.records { + dst = pr.appendTo(dst, int32(i)) + } + + toCompress := dst[recordsAt:] + m.NumRecords = len(b.records) + m.UncompressedBytes = len(toCompress) + m.CompressedBytes = m.UncompressedBytes + + if compressor != nil { + w := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(w) + w.Reset() + + compressed, codec := compressor.compress(w, toCompress, version) + if compressed != nil && // nil would be from an error + len(compressed) < len(toCompress) { + // our compressed was shorter: copy over + copy(dst[recordsAt:], compressed) + dst = dst[:recordsAt+len(compressed)] + m.CompressedBytes = len(compressed) + m.CompressionType = uint8(codec) + + // update the few record batch fields we already wrote + savings := int32(len(toCompress) - len(compressed)) + nullableBytesLen -= savings + batchLen -= savings + b.attrs |= int16(codec) + if !flexible { + kbin.AppendInt32(dst[:nullableBytesLenAt], nullableBytesLen) + } + kbin.AppendInt32(dst[:batchLenAt], batchLen) + kbin.AppendInt16(dst[:attrsAt], b.attrs) + } + } + + kbin.AppendInt32(dst[:crcStart], int32(crc32.Checksum(dst[crcStart+4:], crc32c))) + + return dst, m +} + +func (pr promisedRec) appendTo(dst []byte, offsetDelta int32) []byte { + length, tsDelta := pr.lengthAndTimestampDelta() + dst = kbin.AppendVarint(dst, length) + dst = kbin.AppendInt8(dst, 0) // attributes, currently unused + dst = kbin.AppendVarlong(dst, tsDelta) + dst = kbin.AppendVarint(dst, offsetDelta) + dst = kbin.AppendVarintBytes(dst, pr.Key) + dst = kbin.AppendVarintBytes(dst, pr.Value) + dst = kbin.AppendVarint(dst, int32(len(pr.Headers))) + for _, h := range pr.Headers { + dst = kbin.AppendVarintString(dst, h.Key) + dst = kbin.AppendVarintBytes(dst, h.Value) + } + return dst +} + +func (b seqRecBatch) appendToAsMessageSet(dst []byte, version uint8, compressor *compressor) ([]byte, ProduceBatchMetrics) { + var m ProduceBatchMetrics + + nullableBytesLenAt := len(dst) + dst = append(dst, 0, 0, 0, 0) // nullable bytes len + for i, pr := range b.records { + _, tsDelta := pr.lengthAndTimestampDelta() + dst = appendMessageTo( + dst, + version, + 0, + int64(i), + b.firstTimestamp+tsDelta, + pr.Record, + ) + } + + b.attrs = 0 + + // Produce request v0 and v1 uses message set v0, which does not have + // timestamps. We set bit 8 in our attrs which corresponds with our own + // kgo.RecordAttrs's bit. The attrs field is unused in a sink / recBuf + // outside of the appending functions or finishing records; if we use + // more bits in our internal RecordAttrs, the below will need to + // change. + if version == 0 || version == 1 { + b.attrs |= 0b1000_0000 + } + + toCompress := dst[nullableBytesLenAt+4:] // skip nullable bytes leading prefix + m.NumRecords = len(b.records) + m.UncompressedBytes = len(toCompress) + m.CompressedBytes = m.UncompressedBytes + + if compressor != nil { + w := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(w) + w.Reset() + + compressed, codec := compressor.compress(w, toCompress, int16(version)) + inner := &Record{Value: compressed} + wrappedLength := messageSet0Length(inner) + if version == 2 { + wrappedLength += 8 // timestamp + } + + if compressed != nil && int(wrappedLength) < len(toCompress) { + m.CompressedBytes = int(wrappedLength) + m.CompressionType = uint8(codec) + + b.attrs |= int16(codec) + + dst = appendMessageTo( + dst[:nullableBytesLenAt+4], + version, + int8(codec), + int64(len(b.records)-1), + b.firstTimestamp, + inner, + ) + } + } + + kbin.AppendInt32(dst[:nullableBytesLenAt], int32(len(dst[nullableBytesLenAt+4:]))) + return dst, m +} + +func appendMessageTo( + dst []byte, + version uint8, + attributes int8, + offset int64, + timestamp int64, + r *Record, +) []byte { + magic := version >> 1 + dst = kbin.AppendInt64(dst, offset) + msgSizeStart := len(dst) + dst = append(dst, 0, 0, 0, 0) + crc32Start := len(dst) + dst = append(dst, + 0, 0, 0, 0, + magic, + byte(attributes)) + if magic == 1 { + dst = kbin.AppendInt64(dst, timestamp) + } + dst = kbin.AppendNullableBytes(dst, r.Key) + dst = kbin.AppendNullableBytes(dst, r.Value) + kbin.AppendInt32(dst[:crc32Start], int32(crc32.ChecksumIEEE(dst[crc32Start+4:]))) + kbin.AppendInt32(dst[:msgSizeStart], int32(len(dst[msgSizeStart+4:]))) + return dst +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/source.go b/vendor/github.com/twmb/franz-go/pkg/kgo/source.go new file mode 100644 index 000000000000..0c475d14a941 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/source.go @@ -0,0 +1,2326 @@ +package kgo + +import ( + "context" + "encoding/binary" + "fmt" + "hash/crc32" + "slices" + "sort" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type readerFrom interface { + ReadFrom([]byte) error +} + +// A source consumes from an individual broker. +// +// As long as there is at least one active cursor, a source aims to have *one* +// buffered fetch at all times. As soon as the fetch is taken, a source issues +// another fetch in the background. +type source struct { + cl *Client // our owning client, for cfg, metadata triggering, context, etc. + nodeID int32 // the node ID of the broker this sink belongs to + + // Tracks how many _failed_ fetch requests we have in a row (unable to + // receive a response). Any response, even responses with an ErrorCode + // set, are successful. This field is used for backoff purposes. + consecutiveFailures int + + fetchState workLoop + sem chan struct{} // closed when fetchable, recreated when a buffered fetch exists + buffered bufferedFetch // contains a fetch the source has buffered for polling + + session fetchSession // supports fetch sessions as per KIP-227 + + cursorsMu sync.Mutex + cursors []*cursor // contains all partitions being consumed on this source + cursorsStart int // incremented every fetch req to ensure all partitions are fetched +} + +func (cl *Client) newSource(nodeID int32) *source { + s := &source{ + cl: cl, + nodeID: nodeID, + sem: make(chan struct{}), + } + if cl.cfg.disableFetchSessions { + s.session.kill() + } + close(s.sem) + return s +} + +func (s *source) addCursor(add *cursor) { + s.cursorsMu.Lock() + add.cursorsIdx = len(s.cursors) + s.cursors = append(s.cursors, add) + s.cursorsMu.Unlock() + + // Adding a new cursor may allow a new partition to be fetched. + // We do not need to cancel any current fetch nor kill the session, + // since adding a cursor is non-destructive to work in progress. + // If the session is currently stopped, this is a no-op. + s.maybeConsume() +} + +// Removes a cursor from the source. +// +// The caller should do this with a stopped session if necessary, which +// should clear any buffered fetch and reset the source's session. +func (s *source) removeCursor(rm *cursor) { + s.cursorsMu.Lock() + defer s.cursorsMu.Unlock() + + if rm.cursorsIdx != len(s.cursors)-1 { + s.cursors[rm.cursorsIdx], s.cursors[len(s.cursors)-1] = s.cursors[len(s.cursors)-1], nil + s.cursors[rm.cursorsIdx].cursorsIdx = rm.cursorsIdx + } else { + s.cursors[rm.cursorsIdx] = nil // do not let the memory hang around + } + + s.cursors = s.cursors[:len(s.cursors)-1] + if s.cursorsStart == len(s.cursors) { + s.cursorsStart = 0 + } +} + +// cursor is where we are consuming from for an individual partition. +type cursor struct { + topic string + topicID [16]byte + partition int32 + + unknownIDFails atomicI32 + + keepControl bool // whether to keep control records + + cursorsIdx int // updated under source mutex + + // The source we are currently on. This is modified in two scenarios: + // + // * by metadata when the consumer session is completely stopped + // + // * by a fetch when handling a fetch response that returned preferred + // replicas + // + // This is additionally read within a session when cursor is + // transitioning from used to usable. + source *source + + // useState is an atomic that has two states: unusable and usable. A + // cursor can be used in a fetch request if it is in the usable state. + // Once used, the cursor is unusable, and will be set back to usable + // one the request lifecycle is complete (a usable fetch response, or + // once listing offsets or loading epochs completes). + // + // A cursor can be set back to unusable when sources are stopped. This + // can be done if a group loses a partition, for example. + // + // The used state is exclusively updated by either building a fetch + // request or when the source is stopped. + useState atomicBool + + topicPartitionData // updated in metadata when session is stopped + + // cursorOffset is our epoch/offset that we are consuming. When a fetch + // request is issued, we "freeze" a view of the offset and of the + // leader epoch (see cursorOffsetNext for why the leader epoch). When a + // buffered fetch is taken, we update the cursor. + cursorOffset +} + +// cursorOffset tracks offsets/epochs for a cursor. +type cursorOffset struct { + // What the cursor is at: we request this offset next. + offset int64 + + // The epoch of the last record we consumed. Also used for KIP-320, if + // we are fenced or we have an offset out of range error, we go into + // the OffsetForLeaderEpoch recovery. The last consumed epoch tells the + // broker which offset we want: either (a) the next offset if the last + // consumed epoch is the current epoch, or (b) the offset of the first + // record in the next epoch. This allows for exact offset resetting and + // data loss detection. + // + // See kmsg.OffsetForLeaderEpochResponseTopicPartition for more + // details. + lastConsumedEpoch int32 + + // If we receive OFFSET_OUT_OF_RANGE, and we previously *know* we + // consumed an offset, we reset to the nearest offset after our prior + // known valid consumed offset. + lastConsumedTime time.Time + + // The current high watermark of the partition. Uninitialized (0) means + // we do not know the HWM, or there is no lag. + hwm int64 +} + +// use, for fetch requests, freezes a view of the cursorOffset. +func (c *cursor) use() *cursorOffsetNext { + // A source using a cursor has exclusive access to the use field by + // virtue of that source building a request during a live session, + // or by virtue of the session being stopped. + c.useState.Store(false) + return &cursorOffsetNext{ + cursorOffset: c.cursorOffset, + from: c, + currentLeaderEpoch: c.leaderEpoch, + } +} + +// unset transitions a cursor to an unusable state when the cursor is no longer +// to be consumed. This is called exclusively after sources are stopped. +// This also unsets the cursor offset, which is assumed to be unused now. +func (c *cursor) unset() { + c.useState.Store(false) + c.setOffset(cursorOffset{ + offset: -1, + lastConsumedEpoch: -1, + hwm: 0, + }) +} + +// usable returns whether a cursor can be used for building a fetch request. +func (c *cursor) usable() bool { + return c.useState.Load() +} + +// allowUsable allows a cursor to be fetched, and is called either in assigning +// offsets, or when a buffered fetch is taken or discarded, or when listing / +// epoch loading finishes. +func (c *cursor) allowUsable() { + c.useState.Swap(true) + c.source.maybeConsume() +} + +// setOffset sets the cursors offset which will be used the next time a fetch +// request is built. This function is called under the source mutex while the +// source is stopped, and the caller is responsible for calling maybeConsume +// after. +func (c *cursor) setOffset(o cursorOffset) { + c.cursorOffset = o +} + +// cursorOffsetNext is updated while processing a fetch response. +// +// When a buffered fetch is taken, we update a cursor with the final values in +// the modified cursor offset. +type cursorOffsetNext struct { + cursorOffset + from *cursor + + // The leader epoch at the time we took this cursor offset snapshot. We + // need to copy this rather than accessing it through `from` because a + // fetch request can be canceled while it is being written (and reading + // the epoch). + // + // The leader field itself is only read within the context of a session + // while the session is alive, thus it needs no such guard. + // + // Basically, any field read in AppendTo needs to be copied into + // cursorOffsetNext. + currentLeaderEpoch int32 +} + +type cursorOffsetPreferred struct { + cursorOffsetNext + preferredReplica int32 +} + +// Moves a cursor from one source to another. This is done while handling +// a fetch response, which means within the context of a live session. +func (p *cursorOffsetPreferred) move() { + c := p.from + defer c.allowUsable() + + // Before we migrate the cursor, we check if the destination source + // exists. If not, we do not migrate and instead force a metadata. + + c.source.cl.sinksAndSourcesMu.Lock() + sns, exists := c.source.cl.sinksAndSources[p.preferredReplica] + c.source.cl.sinksAndSourcesMu.Unlock() + + if !exists { + c.source.cl.triggerUpdateMetadataNow("cursor moving to a different broker that is not yet known") + return + } + + // This remove clears the source's session and buffered fetch, although + // we will not have a buffered fetch since moving replicas is called + // before buffering a fetch. + c.source.removeCursor(c) + c.source = sns.source + c.source.addCursor(c) +} + +type cursorPreferreds []cursorOffsetPreferred + +func (cs cursorPreferreds) String() string { + type pnext struct { + p int32 + next int32 + } + ts := make(map[string][]pnext) + for _, c := range cs { + t := c.from.topic + p := c.from.partition + ts[t] = append(ts[t], pnext{p, c.preferredReplica}) + } + tsorted := make([]string, 0, len(ts)) + for t, ps := range ts { + tsorted = append(tsorted, t) + slices.SortFunc(ps, func(l, r pnext) int { + if l.p < r.p { + return -1 + } + if l.p > r.p { + return 1 + } + if l.next < r.next { + return -1 + } + if l.next > r.next { + return 1 + } + return 0 + }) + } + slices.Sort(tsorted) + + sb := new(strings.Builder) + for i, t := range tsorted { + ps := ts[t] + fmt.Fprintf(sb, "%s{", t) + + for j, p := range ps { + if j < len(ps)-1 { + fmt.Fprintf(sb, "%d=>%d, ", p.p, p.next) + } else { + fmt.Fprintf(sb, "%d=>%d", p.p, p.next) + } + } + + if i < len(tsorted)-1 { + fmt.Fprint(sb, "}, ") + } else { + fmt.Fprint(sb, "}") + } + } + return sb.String() +} + +func (cs cursorPreferreds) eachPreferred(fn func(cursorOffsetPreferred)) { + for _, c := range cs { + fn(c) + } +} + +type usedOffsets map[string]map[int32]*cursorOffsetNext + +func (os usedOffsets) eachOffset(fn func(*cursorOffsetNext)) { + for _, ps := range os { + for _, o := range ps { + fn(o) + } + } +} + +func (os usedOffsets) finishUsingAllWithSet() { + os.eachOffset(func(o *cursorOffsetNext) { o.from.setOffset(o.cursorOffset); o.from.allowUsable() }) +} + +func (os usedOffsets) finishUsingAll() { + os.eachOffset(func(o *cursorOffsetNext) { o.from.allowUsable() }) +} + +// bufferedFetch is a fetch response waiting to be consumed by the client. +type bufferedFetch struct { + fetch Fetch + + doneFetch chan<- struct{} // when unbuffered, we send down this + usedOffsets usedOffsets // what the offsets will be next if this fetch is used +} + +func (s *source) hook(f *Fetch, buffered, polled bool) { + s.cl.cfg.hooks.each(func(h Hook) { + if buffered { + h, ok := h.(HookFetchRecordBuffered) + if !ok { + return + } + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + for _, r := range p.Records { + h.OnFetchRecordBuffered(r) + } + } + } + } else { + h, ok := h.(HookFetchRecordUnbuffered) + if !ok { + return + } + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + for _, r := range p.Records { + h.OnFetchRecordUnbuffered(r, polled) + } + } + } + } + }) + + var nrecs int + var nbytes int64 + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + nrecs += len(p.Records) + for k := range p.Records { + nbytes += p.Records[k].userSize() + } + } + } + if buffered { + s.cl.consumer.bufferedRecords.Add(int64(nrecs)) + s.cl.consumer.bufferedBytes.Add(nbytes) + } else { + s.cl.consumer.bufferedRecords.Add(-int64(nrecs)) + s.cl.consumer.bufferedBytes.Add(-nbytes) + } +} + +// takeBuffered drains a buffered fetch and updates offsets. +func (s *source) takeBuffered(paused pausedTopics) Fetch { + if len(paused) == 0 { + return s.takeBufferedFn(true, usedOffsets.finishUsingAllWithSet) + } + var strip map[string]map[int32]struct{} + f := s.takeBufferedFn(true, func(os usedOffsets) { + for t, ps := range os { + // If the entire topic is paused, we allowUsable all + // and strip the topic entirely. + pps, ok := paused.t(t) + if !ok { + for _, o := range ps { + o.from.setOffset(o.cursorOffset) + o.from.allowUsable() + } + continue + } + if strip == nil { + strip = make(map[string]map[int32]struct{}) + } + if pps.all { + for _, o := range ps { + o.from.allowUsable() + } + strip[t] = nil // initialize key, for existence-but-len-0 check below + continue + } + stript := make(map[int32]struct{}) + for _, o := range ps { + if _, ok := pps.m[o.from.partition]; ok { + o.from.allowUsable() + stript[o.from.partition] = struct{}{} + continue + } + o.from.setOffset(o.cursorOffset) + o.from.allowUsable() + } + // We only add stript to strip if there are any + // stripped partitions. We could have a paused + // partition that is on another broker, while this + // broker has no paused partitions -- if we add stript + // here, our logic below (stripping this entire topic) + // is more confusing (present nil vs. non-present nil). + if len(stript) > 0 { + strip[t] = stript + } + } + }) + if strip != nil { + keep := f.Topics[:0] + for _, t := range f.Topics { + stript, ok := strip[t.Topic] + if ok { + if len(stript) == 0 { + continue // stripping this entire topic + } + keepp := t.Partitions[:0] + for _, p := range t.Partitions { + if _, ok := stript[p.Partition]; ok { + continue + } + keepp = append(keepp, p) + } + t.Partitions = keepp + } + keep = append(keep, t) + } + f.Topics = keep + } + return f +} + +func (s *source) discardBuffered() { + s.takeBufferedFn(false, usedOffsets.finishUsingAll) +} + +// takeNBuffered takes a limited amount of records from a buffered fetch, +// updating offsets in each partition per records taken. +// +// This only allows a new fetch once every buffered record has been taken. +// +// This returns the number of records taken and whether the source has been +// completely drained. +func (s *source) takeNBuffered(paused pausedTopics, n int) (Fetch, int, bool) { + var r Fetch + var taken int + + b := &s.buffered + bf := &b.fetch + for len(bf.Topics) > 0 && n > 0 { + t := &bf.Topics[0] + + // If the topic is outright paused, we allowUsable all + // partitions in the topic and skip the topic entirely. + if paused.has(t.Topic, -1) { + bf.Topics = bf.Topics[1:] + for _, pCursor := range b.usedOffsets[t.Topic] { + pCursor.from.allowUsable() + } + delete(b.usedOffsets, t.Topic) + continue + } + + var rt *FetchTopic + ensureTopicAdded := func() { + if rt != nil { + return + } + r.Topics = append(r.Topics, *t) + rt = &r.Topics[len(r.Topics)-1] + rt.Partitions = nil + } + + tCursors := b.usedOffsets[t.Topic] + + for len(t.Partitions) > 0 && n > 0 { + p := &t.Partitions[0] + + if paused.has(t.Topic, p.Partition) { + t.Partitions = t.Partitions[1:] + pCursor := tCursors[p.Partition] + pCursor.from.allowUsable() + delete(tCursors, p.Partition) + if len(tCursors) == 0 { + delete(b.usedOffsets, t.Topic) + } + continue + } + + ensureTopicAdded() + rt.Partitions = append(rt.Partitions, *p) + rp := &rt.Partitions[len(rt.Partitions)-1] + + take := n + if take > len(p.Records) { + take = len(p.Records) + } + + rp.Records = p.Records[:take:take] + p.Records = p.Records[take:] + + n -= take + taken += take + + pCursor := tCursors[p.Partition] + + if len(p.Records) == 0 { + t.Partitions = t.Partitions[1:] + + pCursor.from.setOffset(pCursor.cursorOffset) + pCursor.from.allowUsable() + delete(tCursors, p.Partition) + if len(tCursors) == 0 { + delete(b.usedOffsets, t.Topic) + } + continue + } + + lastReturnedRecord := rp.Records[len(rp.Records)-1] + pCursor.from.setOffset(cursorOffset{ + offset: lastReturnedRecord.Offset + 1, + lastConsumedEpoch: lastReturnedRecord.LeaderEpoch, + lastConsumedTime: lastReturnedRecord.Timestamp, + hwm: p.HighWatermark, + }) + } + + if len(t.Partitions) == 0 { + bf.Topics = bf.Topics[1:] + } + } + + s.hook(&r, false, true) // unbuffered, polled + + drained := len(bf.Topics) == 0 + if drained { + s.takeBuffered(nil) + } + return r, taken, drained +} + +func (s *source) takeBufferedFn(polled bool, offsetFn func(usedOffsets)) Fetch { + r := s.buffered + s.buffered = bufferedFetch{} + offsetFn(r.usedOffsets) + r.doneFetch <- struct{}{} + close(s.sem) + + s.hook(&r.fetch, false, polled) // unbuffered, potentially polled + + return r.fetch +} + +// createReq actually creates a fetch request. +func (s *source) createReq() *fetchRequest { + req := &fetchRequest{ + maxWait: s.cl.cfg.maxWait, + minBytes: s.cl.cfg.minBytes, + maxBytes: s.cl.cfg.maxBytes.load(), + maxPartBytes: s.cl.cfg.maxPartBytes.load(), + rack: s.cl.cfg.rack, + isolationLevel: s.cl.cfg.isolationLevel, + preferLagFn: s.cl.cfg.preferLagFn, + + // We copy a view of the session for the request, which allows + // modify source while the request may be reading its copy. + session: s.session, + } + + paused := s.cl.consumer.loadPaused() + + s.cursorsMu.Lock() + defer s.cursorsMu.Unlock() + + cursorIdx := s.cursorsStart + for i := 0; i < len(s.cursors); i++ { + c := s.cursors[cursorIdx] + cursorIdx = (cursorIdx + 1) % len(s.cursors) + if !c.usable() || paused.has(c.topic, c.partition) { + continue + } + req.addCursor(c) + } + + // We could have lost our only record buffer just before we grabbed the + // source lock above. + if len(s.cursors) > 0 { + s.cursorsStart = (s.cursorsStart + 1) % len(s.cursors) + } + + return req +} + +func (s *source) maybeConsume() { + if s.fetchState.maybeBegin() { + go s.loopFetch() + } +} + +func (s *source) loopFetch() { + consumer := &s.cl.consumer + session := consumer.loadSession() + + if session == noConsumerSession { + s.fetchState.hardFinish() + // It is possible that we were triggered to consume while we + // had no consumer session, and then *after* loopFetch loaded + // noConsumerSession, the session was saved and triggered to + // consume again. If this function is slow the first time + // around, it could still be running and about to hardFinish. + // The second trigger will do nothing, and then we hardFinish + // and block a new session from actually starting consuming. + // + // To guard against this, after we hard finish, we load the + // session again: if it is *not* noConsumerSession, we trigger + // attempting to consume again. Worst case, the trigger is + // useless and it will exit below when it builds an empty + // request. + sessionNow := consumer.loadSession() + if session != sessionNow { + s.maybeConsume() + } + return + } + + session.incWorker() + defer session.decWorker() + + // After our add, check quickly **without** another select case to + // determine if this context was truly canceled. Any other select that + // has another select case could theoretically race with the other case + // also being selected. + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + default: + } + + // We receive on canFetch when we can fetch, and we send back when we + // are done fetching. + canFetch := make(chan chan struct{}, 1) + + again := true + for again { + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + case <-s.sem: + } + + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + case session.desireFetch() <- canFetch: + } + + select { + case <-session.ctx.Done(): + session.cancelFetchCh <- canFetch + s.fetchState.hardFinish() + return + case doneFetch := <-canFetch: + again = s.fetchState.maybeFinish(s.fetch(session, doneFetch)) + } + } +} + +func (s *source) killSessionOnClose(ctx context.Context) { + br, err := s.cl.brokerOrErr(nil, s.nodeID, errUnknownBroker) + if err != nil { + return + } + s.session.kill() + req := &fetchRequest{ + maxWait: 1, + minBytes: 1, + maxBytes: 1, + maxPartBytes: 1, + rack: s.cl.cfg.rack, + isolationLevel: s.cl.cfg.isolationLevel, + session: s.session, + } + ch := make(chan struct{}) + br.do(ctx, req, func(kmsg.Response, error) { close(ch) }) + <-ch +} + +// fetch is the main logic center of fetching messages. +// +// This is a long function, made much longer by winded documentation, that +// contains a lot of the side effects of fetching and updating. The function +// consists of two main bulks of logic: +// +// - First, issue a request that can be killed if the source needs to be +// stopped. Processing the response modifies no state on the source. +// +// - Second, we keep the fetch response and update everything relevant +// (session, trigger some list or epoch updates, buffer the fetch). +// +// One small part between the first and second step is to update preferred +// replicas. We always keep the preferred replicas from the fetch response +// *even if* the source needs to be stopped. The knowledge of which preferred +// replica to use would not be out of date even if the consumer session is +// changing. +func (s *source) fetch(consumerSession *consumerSession, doneFetch chan<- struct{}) (fetched bool) { + req := s.createReq() + + // For all returns, if we do not buffer our fetch, then we want to + // ensure our used offsets are usable again. + var ( + alreadySentToDoneFetch bool + setOffsets bool + buffered bool + ) + defer func() { + if !buffered { + if req.numOffsets > 0 { + if setOffsets { + req.usedOffsets.finishUsingAllWithSet() + } else { + req.usedOffsets.finishUsingAll() + } + } + if !alreadySentToDoneFetch { + doneFetch <- struct{}{} + } + } + }() + + if req.numOffsets == 0 { // cursors could have been set unusable + return + } + + // If our fetch is killed, we want to cancel waiting for the response. + var ( + kresp kmsg.Response + requested = make(chan struct{}) + ctx, cancel = context.WithCancel(consumerSession.ctx) + ) + defer cancel() + + br, err := s.cl.brokerOrErr(ctx, s.nodeID, errUnknownBroker) + if err != nil { + close(requested) + } else { + br.do(ctx, req, func(k kmsg.Response, e error) { + kresp, err = k, e + close(requested) + }) + } + + select { + case <-requested: + fetched = true + case <-ctx.Done(): + return + } + + var didBackoff bool + backoff := func(why interface{}) { + // We preemptively allow more fetches (since we are not buffering) + // and reset our session because of the error (who knows if kafka + // processed the request but the client failed to receive it). + doneFetch <- struct{}{} + alreadySentToDoneFetch = true + s.session.reset() + didBackoff = true + + s.cl.triggerUpdateMetadata(false, fmt.Sprintf("opportunistic load during source backoff: %v", why)) // as good a time as any + s.consecutiveFailures++ + after := time.NewTimer(s.cl.cfg.retryBackoff(s.consecutiveFailures)) + defer after.Stop() + select { + case <-after.C: + case <-ctx.Done(): + } + } + defer func() { + if !didBackoff { + s.consecutiveFailures = 0 + } + }() + + // If we had an error, we backoff. Killing a fetch quits the backoff, + // but that is fine; we may just re-request too early and fall into + // another backoff. + if err != nil { + backoff(err) + return + } + + resp := kresp.(*kmsg.FetchResponse) + + var ( + fetch Fetch + reloadOffsets listOrEpochLoads + preferreds cursorPreferreds + allErrsStripped bool + updateWhy multiUpdateWhy + handled = make(chan struct{}) + ) + + // Theoretically, handleReqResp could take a bit of CPU time due to + // decompressing and processing the response. We do this in a goroutine + // to allow the session to be canceled at any moment. + // + // Processing the response only needs the source's nodeID and client. + go func() { + defer close(handled) + fetch, reloadOffsets, preferreds, allErrsStripped, updateWhy = s.handleReqResp(br, req, resp) + }() + + select { + case <-handled: + case <-ctx.Done(): + return + } + + // The logic below here should be relatively quick. + // + // Note that fetch runs entirely in the context of a consumer session. + // loopFetch does not return until this function does, meaning we + // cannot concurrently issue a second fetch for partitions that are + // being processed below. + + deleteReqUsedOffset := func(topic string, partition int32) { + t := req.usedOffsets[topic] + delete(t, partition) + if len(t) == 0 { + delete(req.usedOffsets, topic) + } + } + + // Before updating the source, we move all cursors that have new + // preferred replicas and remove them from being tracked in our req + // offsets. We also remove the reload offsets from our req offsets. + // + // These two removals transition responsibility for finishing using the + // cursor from the request's used offsets to the new source or the + // reloading. + if len(preferreds) > 0 { + s.cl.cfg.logger.Log(LogLevelInfo, "fetch partitions returned preferred replicas", + "from_broker", s.nodeID, + "moves", preferreds.String(), + ) + } + preferreds.eachPreferred(func(c cursorOffsetPreferred) { + c.move() + deleteReqUsedOffset(c.from.topic, c.from.partition) + }) + reloadOffsets.each(deleteReqUsedOffset) + + // The session on the request was updated; we keep those updates. + s.session = req.session + + // handleReqResp only parses the body of the response, not the top + // level error code. + // + // The top level error code is related to fetch sessions only, and if + // there was an error, the body was empty (so processing is basically a + // no-op). We process the fetch session error now. + switch err := kerr.ErrorForCode(resp.ErrorCode); err { + case kerr.FetchSessionIDNotFound: + if s.session.epoch == 0 { + // If the epoch was zero, the broker did not even + // establish a session for us (and thus is maxed on + // sessions). We stop trying. + s.cl.cfg.logger.Log(LogLevelInfo, "session failed with SessionIDNotFound while trying to establish a session; broker likely maxed on sessions; continuing on without using sessions", "broker", logID(s.nodeID)) + s.session.kill() + } else { + s.cl.cfg.logger.Log(LogLevelInfo, "received SessionIDNotFound from our in use session, our session was likely evicted; resetting session", "broker", logID(s.nodeID)) + s.session.reset() + } + return + case kerr.InvalidFetchSessionEpoch: + s.cl.cfg.logger.Log(LogLevelInfo, "resetting fetch session", "broker", logID(s.nodeID), "err", err) + s.session.reset() + return + + case kerr.FetchSessionTopicIDError, kerr.InconsistentTopicID: + s.cl.cfg.logger.Log(LogLevelInfo, "topic id issues, resetting session and updating metadata", "broker", logID(s.nodeID), "err", err) + s.session.reset() + s.cl.triggerUpdateMetadataNow("topic id issues") + return + } + + // At this point, we have successfully processed the response. Even if + // the response contains no records, we want to keep any offset + // advancements (we could have consumed only control records, we must + // advance past them). + setOffsets = true + + if resp.Version < 7 || resp.SessionID <= 0 { + // If the version is less than 7, we cannot use fetch sessions, + // so we kill them on the first response. + s.session.kill() + } else { + s.session.bumpEpoch(resp.SessionID) + } + + // If we have a reason to update (per-partition fetch errors), and the + // reason is not just unknown topic or partition, then we immediately + // update metadata. We avoid updating for unknown because it _likely_ + // means the topic does not exist and reloading is wasteful. We only + // trigger a metadata update if we have no reload offsets. Having + // reload offsets *always* triggers a metadata update. + if updateWhy != nil { + why := updateWhy.reason(fmt.Sprintf("fetch had inner topic errors from broker %d", s.nodeID)) + // loadWithSessionNow triggers a metadata update IF there are + // offsets to reload. If there are no offsets to reload, we + // trigger one here. + if !reloadOffsets.loadWithSessionNow(consumerSession, why) { + if updateWhy.isOnly(kerr.UnknownTopicOrPartition) || updateWhy.isOnly(kerr.UnknownTopicID) { + s.cl.triggerUpdateMetadata(false, why) + } else { + s.cl.triggerUpdateMetadataNow(why) + } + } + } + + if fetch.hasErrorsOrRecords() { + buffered = true + s.buffered = bufferedFetch{ + fetch: fetch, + doneFetch: doneFetch, + usedOffsets: req.usedOffsets, + } + s.sem = make(chan struct{}) + s.hook(&fetch, true, false) // buffered, not polled + s.cl.consumer.addSourceReadyForDraining(s) + } else if allErrsStripped { + // If we stripped all errors from the response, we are likely + // fetching from topics that were deleted. We want to back off + // a bit rather than spin-loop immediately re-requesting + // deleted topics. + backoff("empty fetch response due to all partitions having retryable errors") + } + return +} + +// Parses a fetch response into a Fetch, offsets to reload, and whether +// metadata needs updating. +// +// This only uses a source's broker and client, and thus does not need +// the source mutex. +// +// This function, and everything it calls, is side effect free. +func (s *source) handleReqResp(br *broker, req *fetchRequest, resp *kmsg.FetchResponse) ( + f Fetch, + reloadOffsets listOrEpochLoads, + preferreds cursorPreferreds, + allErrsStripped bool, + updateWhy multiUpdateWhy, +) { + f = Fetch{Topics: make([]FetchTopic, 0, len(resp.Topics))} + var ( + debugWhyStripped multiUpdateWhy + numErrsStripped int + kip320 = s.cl.supportsOffsetForLeaderEpoch() + kmove kip951move + ) + defer kmove.maybeBeginMove(s.cl) + + strip := func(t string, p int32, err error) { + numErrsStripped++ + if s.cl.cfg.logger.Level() < LogLevelDebug { + return + } + debugWhyStripped.add(t, p, err) + } + + for _, rt := range resp.Topics { + topic := rt.Topic + // v13 only uses topic IDs, so we have to map the response + // uuid's to our string topics. + if resp.Version >= 13 { + topic = req.id2topic[rt.TopicID] + } + + // We always include all cursors on this source in the fetch; + // we should not receive any topics or partitions we do not + // expect. + topicOffsets, ok := req.usedOffsets[topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelWarn, "broker returned topic from fetch that we did not ask for", + "broker", logID(s.nodeID), + "topic", topic, + ) + continue + } + + fetchTopic := FetchTopic{ + Topic: topic, + Partitions: make([]FetchPartition, 0, len(rt.Partitions)), + } + + for i := range rt.Partitions { + rp := &rt.Partitions[i] + partition := rp.Partition + partOffset, ok := topicOffsets[partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelWarn, "broker returned partition from fetch that we did not ask for", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + ) + continue + } + + // If we are fetching from the replica already, Kafka replies with a -1 + // preferred read replica. If Kafka replies with a preferred replica, + // it sends no records. + if preferred := rp.PreferredReadReplica; resp.Version >= 11 && preferred >= 0 { + preferreds = append(preferreds, cursorOffsetPreferred{ + *partOffset, + preferred, + }) + continue + } + + fp := partOffset.processRespPartition(br, rp, s.cl.decompressor, s.cl.cfg.hooks) + if fp.Err != nil { + if moving := kmove.maybeAddFetchPartition(resp, rp, partOffset.from); moving { + strip(topic, partition, fp.Err) + continue + } + updateWhy.add(topic, partition, fp.Err) + } + + // We only keep the partition if it has no error, or an + // error we do not internally retry. + var keep bool + switch fp.Err { + default: + if kerr.IsRetriable(fp.Err) && !s.cl.cfg.keepRetryableFetchErrors { + // UnknownLeaderEpoch: our meta is newer than the broker we fetched from + // OffsetNotAvailable: fetched from out of sync replica or a behind in-sync one (KIP-392 case 1 and case 2) + // UnknownTopicID: kafka has not synced the state on all brokers + // And other standard retryable errors. + strip(topic, partition, fp.Err) + } else { + // - bad auth + // - unsupported compression + // - unsupported message version + // - unknown error + // - or, no error + keep = true + } + + case nil: + partOffset.from.unknownIDFails.Store(0) + keep = true + + case kerr.UnknownTopicID: + // We need to keep UnknownTopicID even though it is + // retryable, because encountering this error means + // the topic has been recreated and we will never + // consume the topic again anymore. This is an error + // worth bubbling up. + // + // Kafka will actually return this error for a brief + // window immediately after creating a topic for the + // first time, meaning the controller has not yet + // propagated to the leader that it is now the leader + // of a new partition. We need to ignore this error + // for a little bit. + if fails := partOffset.from.unknownIDFails.Add(1); fails > 5 { + partOffset.from.unknownIDFails.Add(-1) + keep = true + } else if s.cl.cfg.keepRetryableFetchErrors { + keep = true + } else { + strip(topic, partition, fp.Err) + } + + case kerr.OffsetOutOfRange: + // If we are out of range, we reset to what we can. + // With Kafka >= 2.1, we should only get offset out + // of range if we fetch before the start, but a user + // could start past the end and want to reset to + // the end. We respect that. + // + // KIP-392 (case 3) specifies that if we are consuming + // from a follower, then if our offset request is before + // the low watermark, we list offsets from the follower. + // + // KIP-392 (case 4) specifies that if we are consuming + // a follower and our request is larger than the high + // watermark, then we should first check for truncation + // from the leader and then if we still get out of + // range, reset with list offsets. + // + // It further goes on to say that "out of range errors + // due to ISR propagation delays should be extremely + // rare". Rather than falling back to listing offsets, + // we stay in a cycle of validating the leader epoch + // until the follower has caught up. + // + // In all cases except case 4, we also have to check if + // no reset offset was configured. If so, we ignore + // trying to reset and instead keep our failed partition. + addList := func(replica int32, log bool) { + if s.cl.cfg.resetOffset.noReset { + keep = true + } else if !partOffset.from.lastConsumedTime.IsZero() { + reloadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: replica, + Offset: NewOffset().AfterMilli(partOffset.from.lastConsumedTime.UnixMilli()), + }) + if log { + s.cl.cfg.logger.Log(LogLevelWarn, "received OFFSET_OUT_OF_RANGE, resetting to the nearest offset; either you were consuming too slowly and the broker has deleted the segment you were in the middle of consuming, or the broker has lost data and has not yet transferred leadership", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + "prior_offset", partOffset.offset, + ) + } + } else { + reloadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: replica, + Offset: s.cl.cfg.resetOffset, + }) + if log { + s.cl.cfg.logger.Log(LogLevelInfo, "received OFFSET_OUT_OF_RANGE on the first fetch, resetting to the configured ConsumeResetOffset", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + "prior_offset", partOffset.offset, + ) + } + } + } + + switch { + case s.nodeID == partOffset.from.leader: // non KIP-392 case + addList(-1, true) + + case partOffset.offset < fp.LogStartOffset: // KIP-392 case 3 + addList(s.nodeID, false) + + default: // partOffset.offset > fp.HighWatermark, KIP-392 case 4 + if kip320 { + reloadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: partOffset.offset, + epoch: partOffset.lastConsumedEpoch, + }, + }) + } else { + // If the broker does not support offset for leader epoch but + // does support follower fetching for some reason, we have to + // fallback to listing. + addList(-1, true) + } + } + + case kerr.FencedLeaderEpoch: + // With fenced leader epoch, we notify an error only + // if necessary after we find out if loss occurred. + // If we have consumed nothing, then we got unlucky + // by being fenced right after we grabbed metadata. + // We just refresh metadata and try again. + // + // It would be odd for a broker to reply we are fenced + // but not support offset for leader epoch, so we do + // not check KIP-320 support here. + if partOffset.lastConsumedEpoch >= 0 { + reloadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: partOffset.offset, + epoch: partOffset.lastConsumedEpoch, + }, + }) + } + } + + if keep { + fetchTopic.Partitions = append(fetchTopic.Partitions, fp) + } + } + + if len(fetchTopic.Partitions) > 0 { + f.Topics = append(f.Topics, fetchTopic) + } + } + + if s.cl.cfg.logger.Level() >= LogLevelDebug && len(debugWhyStripped) > 0 { + s.cl.cfg.logger.Log(LogLevelDebug, "fetch stripped partitions", "why", debugWhyStripped.reason("")) + } + + return f, reloadOffsets, preferreds, req.numOffsets == numErrsStripped, updateWhy +} + +// processRespPartition processes all records in all potentially compressed +// batches (or message sets). +func (o *cursorOffsetNext) processRespPartition(br *broker, rp *kmsg.FetchResponseTopicPartition, decompressor *decompressor, hooks hooks) FetchPartition { + fp := FetchPartition{ + Partition: rp.Partition, + Err: kerr.ErrorForCode(rp.ErrorCode), + HighWatermark: rp.HighWatermark, + LastStableOffset: rp.LastStableOffset, + LogStartOffset: rp.LogStartOffset, + } + if rp.ErrorCode == 0 { + o.hwm = rp.HighWatermark + } + + var aborter aborter + if br.cl.cfg.isolationLevel == 1 { + aborter = buildAborter(rp) + } + + // A response could contain any of message v0, message v1, or record + // batches, and this is solely dictated by the magic byte (not the + // fetch response version). The magic byte is located at byte 17. + // + // 1 thru 8: int64 offset / first offset + // 9 thru 12: int32 length + // 13 thru 16: crc (magic 0 or 1), or partition leader epoch (magic 2) + // 17: magic + // + // We decode and validate similarly for messages and record batches, so + // we "abstract" away the high level stuff into a check function just + // below, and then switch based on the magic for how to process. + var ( + in = rp.RecordBatches + + r readerFrom + kind string + length int32 + lengthField *int32 + crcField *int32 + crcTable *crc32.Table + crcAt int + + check = func() bool { + // If we call into check, we know we have a valid + // length, so we should be at least able to parse our + // top level struct and validate the length and CRC. + if err := r.ReadFrom(in[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read %s, not enough data", kind) + return false + } + if length := int32(len(in[12:length])); length != *lengthField { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", *lengthField, length) + return false + } + // We have already validated that the slice is at least + // 17 bytes, but our CRC may be later (i.e. RecordBatch + // starts at byte 21). Ensure there is at least space + // for a CRC. + if len(in) < crcAt { + fp.Err = fmt.Errorf("length %d is too short to allow for a crc", len(in)) + return false + } + if crcCalc := int32(crc32.Checksum(in[crcAt:length], crcTable)); crcCalc != *crcField { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", *crcField, crcCalc) + return false + } + return true + } + ) + + for len(in) > 17 && fp.Err == nil { + offset := int64(binary.BigEndian.Uint64(in)) + length = int32(binary.BigEndian.Uint32(in[8:])) + length += 12 // for the int64 offset we skipped and int32 length field itself + if len(in) < int(length) { + break + } + + switch magic := in[16]; magic { + case 0: + m := new(kmsg.MessageV0) + kind = "message v0" + lengthField = &m.MessageSize + crcField = &m.CRC + crcTable = crc32.IEEETable + crcAt = 16 + r = m + case 1: + m := new(kmsg.MessageV1) + kind = "message v1" + lengthField = &m.MessageSize + crcField = &m.CRC + crcTable = crc32.IEEETable + crcAt = 16 + r = m + case 2: + rb := new(kmsg.RecordBatch) + kind = "record batch" + lengthField = &rb.Length + crcField = &rb.CRC + crcTable = crc32c + crcAt = 21 + r = rb + + default: + fp.Err = fmt.Errorf("unknown magic %d; message offset is %d and length is %d, skipping and setting to next offset", magic, offset, length) + if next := offset + 1; next > o.offset { + o.offset = next + } + return fp + } + + if !check() { + break + } + + in = in[length:] + + var m FetchBatchMetrics + + switch t := r.(type) { + case *kmsg.MessageV0: + m.CompressedBytes = int(length) // for message sets, we include the message set overhead in length + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processV0OuterMessage(&fp, t, decompressor) + + case *kmsg.MessageV1: + m.CompressedBytes = int(length) + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processV1OuterMessage(&fp, t, decompressor) + + case *kmsg.RecordBatch: + m.CompressedBytes = len(t.Records) // for record batches, we only track the record batch length + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processRecordBatch(&fp, t, aborter, decompressor) + } + + if m.UncompressedBytes == 0 { + m.UncompressedBytes = m.CompressedBytes + } + hooks.each(func(h Hook) { + if h, ok := h.(HookFetchBatchRead); ok { + h.OnFetchBatchRead(br.meta, o.from.topic, o.from.partition, m) + } + }) + } + + return fp +} + +type aborter map[int64][]int64 + +func buildAborter(rp *kmsg.FetchResponseTopicPartition) aborter { + if len(rp.AbortedTransactions) == 0 { + return nil + } + a := make(aborter) + for _, abort := range rp.AbortedTransactions { + a[abort.ProducerID] = append(a[abort.ProducerID], abort.FirstOffset) + } + return a +} + +func (a aborter) shouldAbortBatch(b *kmsg.RecordBatch) bool { + if len(a) == 0 || b.Attributes&0b0001_0000 == 0 { + return false + } + pidAborts := a[b.ProducerID] + if len(pidAborts) == 0 { + return false + } + // If the first offset in this batch is less than the first offset + // aborted, then this batch is not aborted. + if b.FirstOffset < pidAborts[0] { + return false + } + return true +} + +func (a aborter) trackAbortedPID(producerID int64) { + remaining := a[producerID][1:] + if len(remaining) == 0 { + delete(a, producerID) + } else { + a[producerID] = remaining + } +} + +////////////////////////////////////// +// processing records to fetch part // +////////////////////////////////////// + +// readRawRecords reads n records from in and returns them, returning early if +// there were partial records. +func readRawRecords(n int, in []byte) []kmsg.Record { + rs := make([]kmsg.Record, n) + for i := 0; i < n; i++ { + length, used := kbin.Varint(in) + total := used + int(length) + if used == 0 || length < 0 || len(in) < total { + return rs[:i] + } + if err := (&rs[i]).ReadFrom(in[:total]); err != nil { + return rs[:i] + } + in = in[total:] + } + return rs +} + +func (o *cursorOffsetNext) processRecordBatch( + fp *FetchPartition, + batch *kmsg.RecordBatch, + aborter aborter, + decompressor *decompressor, +) (int, int) { + if batch.Magic != 2 { + fp.Err = fmt.Errorf("unknown batch magic %d", batch.Magic) + return 0, 0 + } + lastOffset := batch.FirstOffset + int64(batch.LastOffsetDelta) + if lastOffset < o.offset { + // If the last offset in this batch is less than what we asked + // for, we got a batch that we entirely do not need. We can + // avoid all work (although we should not get this batch). + return 0, 0 + } + + rawRecords := batch.Records + if compression := byte(batch.Attributes & 0x0007); compression != 0 { + var err error + if rawRecords, err = decompressor.decompress(rawRecords, compression); err != nil { + return 0, 0 // truncated batch + } + } + + uncompressedBytes := len(rawRecords) + + numRecords := int(batch.NumRecords) + krecords := readRawRecords(numRecords, rawRecords) + + // KAFKA-5443: compacted topics preserve the last offset in a batch, + // even if the last record is removed, meaning that using offsets from + // records alone may not get us to the next offset we need to ask for. + // + // We only perform this logic if we did not consume a truncated batch. + // If we consume a truncated batch, then what was truncated could have + // been an offset we are interested in consuming. Even if our fetch did + // not advance this partition at all, we will eventually fetch from the + // partition and not have a truncated response, at which point we will + // either advance offsets or will set to nextAskOffset. + nextAskOffset := lastOffset + 1 + defer func() { + if numRecords == len(krecords) && o.offset < nextAskOffset { + o.offset = nextAskOffset + } + }() + + abortBatch := aborter.shouldAbortBatch(batch) + for i := range krecords { + record := recordToRecord( + o.from.topic, + fp.Partition, + batch, + &krecords[i], + ) + o.maybeKeepRecord(fp, record, abortBatch) + + if abortBatch && record.Attrs.IsControl() { + // A control record has a key and a value where the key + // is int16 version and int16 type. Aborted records + // have a type of 0. + if key := record.Key; len(key) >= 4 && key[2] == 0 && key[3] == 0 { + aborter.trackAbortedPID(batch.ProducerID) + } + } + } + + return len(krecords), uncompressedBytes +} + +// Processes an outer v1 message. There could be no inner message, which makes +// this easy, but if not, we decompress and process each inner message as +// either v0 or v1. We only expect the inner message to be v1, but technically +// a crazy pipeline could have v0 anywhere. +func (o *cursorOffsetNext) processV1OuterMessage( + fp *FetchPartition, + message *kmsg.MessageV1, + decompressor *decompressor, +) (int, int) { + compression := byte(message.Attributes & 0x0003) + if compression == 0 { + o.processV1Message(fp, message) + return 1, 0 + } + + rawInner, err := decompressor.decompress(message.Value, compression) + if err != nil { + return 0, 0 // truncated batch + } + + uncompressedBytes := len(rawInner) + + var innerMessages []readerFrom +out: + for len(rawInner) > 17 { // magic at byte 17 + length := int32(binary.BigEndian.Uint32(rawInner[8:])) + length += 12 // offset and length fields + if len(rawInner) < int(length) { + break + } + + var ( + magic = rawInner[16] + + msg readerFrom + lengthField *int32 + crcField *int32 + ) + + switch magic { + case 0: + m := new(kmsg.MessageV0) + msg = m + lengthField = &m.MessageSize + crcField = &m.CRC + case 1: + m := new(kmsg.MessageV1) + msg = m + lengthField = &m.MessageSize + crcField = &m.CRC + + default: + fp.Err = fmt.Errorf("message set v1 has inner message with invalid magic %d", magic) + break out + } + + if err := msg.ReadFrom(rawInner[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read message v%d, not enough data", magic) + break + } + if length := int32(len(rawInner[12:length])); length != *lengthField { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", *lengthField, length) + break + } + if crcCalc := int32(crc32.ChecksumIEEE(rawInner[16:length])); crcCalc != *crcField { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", *crcField, crcCalc) + break + } + innerMessages = append(innerMessages, msg) + rawInner = rawInner[length:] + } + if len(innerMessages) == 0 { + return 0, uncompressedBytes + } + + firstOffset := message.Offset - int64(len(innerMessages)) + 1 + for i := range innerMessages { + innerMessage := innerMessages[i] + switch innerMessage := innerMessage.(type) { + case *kmsg.MessageV0: + innerMessage.Offset = firstOffset + int64(i) + innerMessage.Attributes |= int8(compression) + if !o.processV0Message(fp, innerMessage) { + return i, uncompressedBytes + } + case *kmsg.MessageV1: + innerMessage.Offset = firstOffset + int64(i) + innerMessage.Attributes |= int8(compression) + if !o.processV1Message(fp, innerMessage) { + return i, uncompressedBytes + } + } + } + return len(innerMessages), uncompressedBytes +} + +func (o *cursorOffsetNext) processV1Message( + fp *FetchPartition, + message *kmsg.MessageV1, +) bool { + if message.Magic != 1 { + fp.Err = fmt.Errorf("unknown message magic %d", message.Magic) + return false + } + if uint8(message.Attributes)&0b1111_0000 != 0 { + fp.Err = fmt.Errorf("unknown attributes on message %d", message.Attributes) + return false + } + record := v1MessageToRecord(o.from.topic, fp.Partition, message) + o.maybeKeepRecord(fp, record, false) + return true +} + +// Processes an outer v0 message. We expect inner messages to be entirely v0 as +// well, so this only tries v0 always. +func (o *cursorOffsetNext) processV0OuterMessage( + fp *FetchPartition, + message *kmsg.MessageV0, + decompressor *decompressor, +) (int, int) { + compression := byte(message.Attributes & 0x0003) + if compression == 0 { + o.processV0Message(fp, message) + return 1, 0 // uncompressed bytes is 0; set to compressed bytes on return + } + + rawInner, err := decompressor.decompress(message.Value, compression) + if err != nil { + return 0, 0 // truncated batch + } + + uncompressedBytes := len(rawInner) + + var innerMessages []kmsg.MessageV0 + for len(rawInner) > 17 { // magic at byte 17 + length := int32(binary.BigEndian.Uint32(rawInner[8:])) + length += 12 // offset and length fields + if len(rawInner) < int(length) { + break // truncated batch + } + var m kmsg.MessageV0 + if err := m.ReadFrom(rawInner[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read message v0, not enough data") + break + } + if length := int32(len(rawInner[12:length])); length != m.MessageSize { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", m.MessageSize, length) + break + } + if crcCalc := int32(crc32.ChecksumIEEE(rawInner[16:length])); crcCalc != m.CRC { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", m.CRC, crcCalc) + break + } + innerMessages = append(innerMessages, m) + rawInner = rawInner[length:] + } + if len(innerMessages) == 0 { + return 0, uncompressedBytes + } + + firstOffset := message.Offset - int64(len(innerMessages)) + 1 + for i := range innerMessages { + innerMessage := &innerMessages[i] + innerMessage.Attributes |= int8(compression) + innerMessage.Offset = firstOffset + int64(i) + if !o.processV0Message(fp, innerMessage) { + return i, uncompressedBytes + } + } + return len(innerMessages), uncompressedBytes +} + +func (o *cursorOffsetNext) processV0Message( + fp *FetchPartition, + message *kmsg.MessageV0, +) bool { + if message.Magic != 0 { + fp.Err = fmt.Errorf("unknown message magic %d", message.Magic) + return false + } + if uint8(message.Attributes)&0b1111_1000 != 0 { + fp.Err = fmt.Errorf("unknown attributes on message %d", message.Attributes) + return false + } + record := v0MessageToRecord(o.from.topic, fp.Partition, message) + o.maybeKeepRecord(fp, record, false) + return true +} + +// maybeKeepRecord keeps a record if it is within our range of offsets to keep. +// +// If the record is being aborted or the record is a control record and the +// client does not want to keep control records, this does not keep the record. +func (o *cursorOffsetNext) maybeKeepRecord(fp *FetchPartition, record *Record, abort bool) { + if record.Offset < o.offset { + // We asked for offset 5, but that was in the middle of a + // batch; we got offsets 0 thru 4 that we need to skip. + return + } + + // We only keep control records if specifically requested. + if record.Attrs.IsControl() { + abort = !o.from.keepControl + } + if !abort { + fp.Records = append(fp.Records, record) + } + + // The record offset may be much larger than our expected offset if the + // topic is compacted. + o.offset = record.Offset + 1 + o.lastConsumedEpoch = record.LeaderEpoch + o.lastConsumedTime = record.Timestamp +} + +/////////////////////////////// +// kmsg.Record to kgo.Record // +/////////////////////////////// + +func timeFromMillis(millis int64) time.Time { + return time.Unix(0, millis*1e6) +} + +// recordToRecord converts a kmsg.RecordBatch's Record to a kgo Record. +func recordToRecord( + topic string, + partition int32, + batch *kmsg.RecordBatch, + record *kmsg.Record, +) *Record { + h := make([]RecordHeader, 0, len(record.Headers)) + for _, kv := range record.Headers { + h = append(h, RecordHeader{ + Key: kv.Key, + Value: kv.Value, + }) + } + + r := &Record{ + Key: record.Key, + Value: record.Value, + Headers: h, + Topic: topic, + Partition: partition, + Attrs: RecordAttrs{uint8(batch.Attributes)}, + ProducerID: batch.ProducerID, + ProducerEpoch: batch.ProducerEpoch, + LeaderEpoch: batch.PartitionLeaderEpoch, + Offset: batch.FirstOffset + int64(record.OffsetDelta), + } + if r.Attrs.TimestampType() == 0 { + r.Timestamp = timeFromMillis(batch.FirstTimestamp + record.TimestampDelta64) + } else { + r.Timestamp = timeFromMillis(batch.MaxTimestamp) + } + return r +} + +func messageAttrsToRecordAttrs(attrs int8, v0 bool) RecordAttrs { + uattrs := uint8(attrs) + if v0 { + uattrs |= 0b1000_0000 + } + return RecordAttrs{uattrs} +} + +func v0MessageToRecord( + topic string, + partition int32, + message *kmsg.MessageV0, +) *Record { + return &Record{ + Key: message.Key, + Value: message.Value, + Topic: topic, + Partition: partition, + Attrs: messageAttrsToRecordAttrs(message.Attributes, true), + ProducerID: -1, + ProducerEpoch: -1, + LeaderEpoch: -1, + Offset: message.Offset, + } +} + +func v1MessageToRecord( + topic string, + partition int32, + message *kmsg.MessageV1, +) *Record { + return &Record{ + Key: message.Key, + Value: message.Value, + Timestamp: timeFromMillis(message.Timestamp), + Topic: topic, + Partition: partition, + Attrs: messageAttrsToRecordAttrs(message.Attributes, false), + ProducerID: -1, + ProducerEpoch: -1, + LeaderEpoch: -1, + Offset: message.Offset, + } +} + +////////////////// +// fetchRequest // +////////////////// + +type fetchRequest struct { + version int16 + maxWait int32 + minBytes int32 + maxBytes int32 + maxPartBytes int32 + rack string + + isolationLevel int8 + preferLagFn PreferLagFn + + numOffsets int + usedOffsets usedOffsets + + torder []string // order of topics to write + porder map[string][]int32 // per topic, order of partitions to write + + // topic2id and id2topic track bidirectional lookup of topics and IDs + // that are being added to *this* specific request. topic2id slightly + // duplicates the map t2id in the fetch session, but t2id is different + // in that t2id tracks IDs in use from all prior requests -- and, + // importantly, t2id is cleared of IDs that are no longer used (see + // ForgottenTopics). + // + // We need to have both a session t2id map and a request t2id map: + // + // * The session t2id is what we use when creating forgotten topics. + // If we are forgetting a topic, the ID is not in the req t2id. + // + // * The req topic2id is used for adding to the session t2id. When + // building a request, if the id is in req.topic2id but not + // session.t2id, we promote the ID into the session map. + // + // Lastly, id2topic is used when handling the response, as our reverse + // lookup from the ID back to the topic (and then we work with the + // topic name only). There is no equivalent in the session because + // there is no need for the id2topic lookup ever in the session. + topic2id map[string][16]byte + id2topic map[[16]byte]string + + disableIDs bool // #295: using an old IBP on new Kafka results in ApiVersions advertising 13+ while the broker does not return IDs + + // Session is a copy of the source session at the time a request is + // built. If the source is reset, the session it has is reset at the + // field level only. Our view of the original session is still valid. + session fetchSession +} + +func (f *fetchRequest) addCursor(c *cursor) { + if f.usedOffsets == nil { + f.usedOffsets = make(usedOffsets) + f.id2topic = make(map[[16]byte]string) + f.topic2id = make(map[string][16]byte) + f.porder = make(map[string][]int32) + } + partitions := f.usedOffsets[c.topic] + if partitions == nil { + partitions = make(map[int32]*cursorOffsetNext) + f.usedOffsets[c.topic] = partitions + f.id2topic[c.topicID] = c.topic + f.topic2id[c.topic] = c.topicID + var noID [16]byte + if c.topicID == noID { + f.disableIDs = true + } + f.torder = append(f.torder, c.topic) + } + partitions[c.partition] = c.use() + f.porder[c.topic] = append(f.porder[c.topic], c.partition) + f.numOffsets++ +} + +// PreferLagFn accepts topic and partition lag, the previously determined topic +// order, and the previously determined per-topic partition order, and returns +// a new topic and per-topic partition order. +// +// Most use cases will not need to look at the prior orders, but they exist if +// you want to get fancy. +// +// You can return partial results: if you only return topics, partitions within +// each topic keep their prior ordering. If you only return some topics but not +// all, the topics you do not return / the partitions you do not return will +// retain their original ordering *after* your given ordering. +// +// NOTE: torderPrior and porderPrior must not be modified. To avoid a bit of +// unnecessary allocations, these arguments are views into data that is used to +// build a fetch request. +type PreferLagFn func(lag map[string]map[int32]int64, torderPrior []string, porderPrior map[string][]int32) ([]string, map[string][]int32) + +// PreferLagAt is a simple PreferLagFn that orders the largest lag first, for +// any topic that is collectively lagging more than preferLagAt, and for any +// partition that is lagging more than preferLagAt. +// +// The function does not prescribe any ordering for topics that have the same +// lag. It is recommended to use a number more than 0 or 1: if you use 0, you +// may just always undo client ordering when there is no actual lag. +func PreferLagAt(preferLagAt int64) PreferLagFn { + if preferLagAt < 0 { + return nil + } + return func(lag map[string]map[int32]int64, _ []string, _ map[string][]int32) ([]string, map[string][]int32) { + type plag struct { + p int32 + lag int64 + } + type tlag struct { + t string + lag int64 + ps []plag + } + + // First, collect all partition lag into per-topic lag. + tlags := make(map[string]tlag, len(lag)) + for t, ps := range lag { + for p, lag := range ps { + prior := tlags[t] + tlags[t] = tlag{ + t: t, + lag: prior.lag + lag, + ps: append(prior.ps, plag{p, lag}), + } + } + } + + // We now remove topics and partitions that are not lagging + // enough. Collectively, the topic could be lagging too much, + // but individually, no partition is lagging that much: we will + // sort the topic first and keep the old partition ordering. + for t, tlag := range tlags { + if tlag.lag < preferLagAt { + delete(tlags, t) + continue + } + for i := 0; i < len(tlag.ps); i++ { + plag := tlag.ps[i] + if plag.lag < preferLagAt { + tlag.ps[i] = tlag.ps[len(tlag.ps)-1] + tlag.ps = tlag.ps[:len(tlag.ps)-1] + i-- + } + } + } + if len(tlags) == 0 { + return nil, nil + } + + var sortedLags []tlag + for _, tlag := range tlags { + sort.Slice(tlag.ps, func(i, j int) bool { return tlag.ps[i].lag > tlag.ps[j].lag }) + sortedLags = append(sortedLags, tlag) + } + sort.Slice(sortedLags, func(i, j int) bool { return sortedLags[i].lag > sortedLags[j].lag }) + + // We now return our laggy topics and partitions, and let the + // caller add back any missing topics / partitions in their + // prior order. + torder := make([]string, 0, len(sortedLags)) + for _, t := range sortedLags { + torder = append(torder, t.t) + } + porder := make(map[string][]int32, len(sortedLags)) + for _, tlag := range sortedLags { + ps := make([]int32, 0, len(tlag.ps)) + for _, p := range tlag.ps { + ps = append(ps, p.p) + } + porder[tlag.t] = ps + } + return torder, porder + } +} + +// If the end user prefers to consume lag, we reorder our previously ordered +// partitions, preferring first the laggiest topics, and then within those, the +// laggiest partitions. +func (f *fetchRequest) adjustPreferringLag() { + if f.preferLagFn == nil { + return + } + + tall := make(map[string]struct{}, len(f.torder)) + for _, t := range f.torder { + tall[t] = struct{}{} + } + pall := make(map[string][]int32, len(f.porder)) + for t, ps := range f.porder { + pall[t] = append([]int32(nil), ps...) + } + + lag := make(map[string]map[int32]int64, len(f.torder)) + for t, ps := range f.usedOffsets { + plag := make(map[int32]int64, len(ps)) + lag[t] = plag + for p, c := range ps { + hwm := c.hwm + if c.hwm < 0 { + hwm = 0 + } + lag := hwm - c.offset + if c.offset <= 0 { + lag = hwm + } + if lag < 0 { + lag = 0 + } + plag[p] = lag + } + } + + torder, porder := f.preferLagFn(lag, f.torder, f.porder) + if torder == nil && porder == nil { + return + } + defer func() { f.torder, f.porder = torder, porder }() + + if len(torder) == 0 { + torder = f.torder // user did not modify topic order, keep old order + } else { + // Remove any extra topics the user returned that we were not + // consuming, and add all topics they did not give back. + for i := 0; i < len(torder); i++ { + t := torder[i] + if _, exists := tall[t]; !exists { + torder = append(torder[:i], torder[i+1:]...) // user gave topic we were not fetching + i-- + } + delete(tall, t) + } + for _, t := range f.torder { + if _, exists := tall[t]; exists { + torder = append(torder, t) // user did not return topic we were fetching + delete(tall, t) + } + } + } + + if len(porder) == 0 { + porder = f.porder // user did not modify partition order, keep old order + return + } + + pused := make(map[int32]struct{}) + for t, ps := range pall { + order, exists := porder[t] + if !exists { + porder[t] = ps // shortcut: user did not define this partition's oorder, keep old order + continue + } + for _, p := range ps { + pused[p] = struct{}{} + } + for i := 0; i < len(order); i++ { + p := order[i] + if _, exists := pused[p]; !exists { + order = append(order[:i], order[i+1:]...) + i-- + } + delete(pused, p) + } + for _, p := range f.porder[t] { + if _, exists := pused[p]; exists { + order = append(order, p) + delete(pused, p) + } + } + porder[t] = order + } +} + +func (*fetchRequest) Key() int16 { return 1 } +func (f *fetchRequest) MaxVersion() int16 { + if f.disableIDs || f.session.disableIDs { + return 12 + } + return 16 +} +func (f *fetchRequest) SetVersion(v int16) { f.version = v } +func (f *fetchRequest) GetVersion() int16 { return f.version } +func (f *fetchRequest) IsFlexible() bool { return f.version >= 12 } // version 12+ is flexible +func (f *fetchRequest) AppendTo(dst []byte) []byte { + req := kmsg.NewFetchRequest() + req.Version = f.version + req.ReplicaID = -1 + req.MaxWaitMillis = f.maxWait + req.MinBytes = f.minBytes + req.MaxBytes = f.maxBytes + req.IsolationLevel = f.isolationLevel + req.SessionID = f.session.id + req.SessionEpoch = f.session.epoch + req.Rack = f.rack + + // We track which partitions we add in this request; any partitions + // missing that are already in the session get added to forgotten + // topics at the end. + var sessionUsed map[string]map[int32]struct{} + if !f.session.killed { + sessionUsed = make(map[string]map[int32]struct{}, len(f.usedOffsets)) + } + + f.adjustPreferringLag() + + for _, topic := range f.torder { + partitions := f.usedOffsets[topic] + + var reqTopic *kmsg.FetchRequestTopic + sessionTopic := f.session.lookupTopic(topic, f.topic2id) + + var usedTopic map[int32]struct{} + if sessionUsed != nil { + usedTopic = make(map[int32]struct{}, len(partitions)) + } + + for _, partition := range f.porder[topic] { + cursorOffsetNext := partitions[partition] + + if usedTopic != nil { + usedTopic[partition] = struct{}{} + } + + if !sessionTopic.hasPartitionAt( + partition, + cursorOffsetNext.offset, + cursorOffsetNext.currentLeaderEpoch, + ) { + if reqTopic == nil { + t := kmsg.NewFetchRequestTopic() + t.Topic = topic + t.TopicID = f.topic2id[topic] + req.Topics = append(req.Topics, t) + reqTopic = &req.Topics[len(req.Topics)-1] + } + + reqPartition := kmsg.NewFetchRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.CurrentLeaderEpoch = cursorOffsetNext.currentLeaderEpoch + reqPartition.FetchOffset = cursorOffsetNext.offset + reqPartition.LastFetchedEpoch = -1 + reqPartition.LogStartOffset = -1 + reqPartition.PartitionMaxBytes = f.maxPartBytes + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + } + + if sessionUsed != nil { + sessionUsed[topic] = usedTopic + } + } + + // Now for everything that we did not use in our session, add it to + // forgotten topics and remove it from the session. + if sessionUsed != nil { + for topic, partitions := range f.session.used { + var forgottenTopic *kmsg.FetchRequestForgottenTopic + topicUsed := sessionUsed[topic] + for partition := range partitions { + if topicUsed != nil { + if _, partitionUsed := topicUsed[partition]; partitionUsed { + continue + } + } + if forgottenTopic == nil { + t := kmsg.NewFetchRequestForgottenTopic() + t.Topic = topic + t.TopicID = f.session.t2id[topic] + req.ForgottenTopics = append(req.ForgottenTopics, t) + forgottenTopic = &req.ForgottenTopics[len(req.ForgottenTopics)-1] + } + forgottenTopic.Partitions = append(forgottenTopic.Partitions, partition) + delete(partitions, partition) + } + if len(partitions) == 0 { + delete(f.session.used, topic) + id := f.session.t2id[topic] + delete(f.session.t2id, topic) + // If we deleted a topic that was missing an ID, then we clear the + // previous disableIDs state. We potentially *reenable* disableIDs + // if any remaining topics in our session are also missing their ID. + var noID [16]byte + if id == noID { + f.session.disableIDs = false + for _, id := range f.session.t2id { + if id == noID { + f.session.disableIDs = true + break + } + } + } + } + } + } + + return req.AppendTo(dst) +} + +func (*fetchRequest) ReadFrom([]byte) error { + panic("unreachable -- the client never uses ReadFrom on its internal fetchRequest") +} + +func (f *fetchRequest) ResponseKind() kmsg.Response { + r := kmsg.NewPtrFetchResponse() + r.Version = f.version + return r +} + +// fetchSessions, introduced in KIP-227, allow us to send less information back +// and forth to a Kafka broker. +type fetchSession struct { + id int32 + epoch int32 + + used map[string]map[int32]fetchSessionOffsetEpoch // what we have in the session so far + t2id map[string][16]byte + + disableIDs bool // if anything in t2id has no ID + killed bool // if we cannot use a session anymore +} + +func (s *fetchSession) kill() { + s.epoch = -1 + s.used = nil + s.t2id = nil + s.disableIDs = false + s.killed = true +} + +// reset resets the session by setting the next request to use epoch 0. +// We do not reset the ID; using epoch 0 for an existing ID unregisters the +// prior session. +func (s *fetchSession) reset() { + if s.killed { + return + } + s.epoch = 0 + s.used = nil + s.t2id = nil + s.disableIDs = false +} + +// bumpEpoch bumps the epoch and saves the session id. +// +// Kafka replies with the session ID of the session to use. When it does, we +// start from epoch 1, wrapping back to 1 if we go negative. +func (s *fetchSession) bumpEpoch(id int32) { + if s.killed { + return + } + if id != s.id { + s.epoch = 0 // new session: reset to 0 for the increment below + } + s.epoch++ + if s.epoch < 0 { + s.epoch = 1 // we wrapped: reset back to 1 to continue this session + } + s.id = id +} + +func (s *fetchSession) lookupTopic(topic string, t2id map[string][16]byte) fetchSessionTopic { + if s.killed { + return nil + } + if s.used == nil { + s.used = make(map[string]map[int32]fetchSessionOffsetEpoch) + s.t2id = make(map[string][16]byte) + } + t := s.used[topic] + if t == nil { + t = make(map[int32]fetchSessionOffsetEpoch) + s.used[topic] = t + id := t2id[topic] + s.t2id[topic] = id + if id == ([16]byte{}) { + s.disableIDs = true + } + } + return t +} + +type fetchSessionOffsetEpoch struct { + offset int64 + epoch int32 +} + +type fetchSessionTopic map[int32]fetchSessionOffsetEpoch + +func (s fetchSessionTopic) hasPartitionAt(partition int32, offset int64, epoch int32) bool { + if s == nil { // if we are nil, the session was killed + return false + } + at, exists := s[partition] + now := fetchSessionOffsetEpoch{offset, epoch} + s[partition] = now + return exists && at == now +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go b/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go new file mode 100644 index 000000000000..6ff862fbf509 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go @@ -0,0 +1,205 @@ +package kgo + +import ( + "strconv" + "time" +) + +// NOTE: this code is copied from github.com/twmb/go-strftime, with AppendFormat +// being unexported. + +// appendFormat appends t to dst according to the input strftime format. +// +// this does not take into account locale; some high level differences: +// +// %E and %O are stripped, as well as a single subsequent alpha char +// %x is DD/MM/YY +// %c is time.ANSIC +// +// In normal strftime, %a, %A, %b, %B, %c, %p, %P, %r, %x, and %X are all +// affected by locale. This package hardcodes the implementation to mirror +// LC_TIME=C (minus %x). Every strftime(3) formatter is accounted for. +func strftimeAppendFormat(dst []byte, format string, t time.Time) []byte { + for i := 0; i < len(format); i++ { + c := format[i] + if c != '%' || i == len(format)-1 { + dst = append(dst, c) + continue + } + + i++ + c = format[i] + switch c { + default: + dst = append(dst, '%', c) + case 'a': // abbrev day + dst = t.AppendFormat(dst, "Mon") + case 'A': // full day + dst = t.AppendFormat(dst, "Monday") + case 'b', 'h': // abbrev month, h is equivalent to b + dst = t.AppendFormat(dst, "Jan") + case 'B': // full month + dst = t.AppendFormat(dst, "January") + case 'c': // preferred date and time representation + dst = t.AppendFormat(dst, time.ANSIC) + case 'C': // century (year/100) as two digit num + dst = append0Pad(dst, t.Year()/100, 2) + case 'd': // day of month as two digit num + dst = append0Pad(dst, t.Day(), 2) + case 'D': // %m/%d/%y + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Day(), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Year()%100, 2) + case 'e': // day of month as num like %d, but leading 0 is space instead + dst = appendSpacePad(dst, t.Day()) + case 'E', 'O': // modifier, ignored and skip next (if ascii) + if i+1 < len(format) { + next := format[i+1] + if 'a' <= next && next <= 'z' || 'A' <= next && next <= 'Z' { + i++ + } + } + case 'F': // %Y-%m-%d (iso8601) + dst = strconv.AppendInt(dst, int64(t.Year()), 10) + dst = append(dst, '-') + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '-') + dst = append0Pad(dst, t.Day(), 2) + case 'G': // iso8601 week-based year + year, _ := t.ISOWeek() + dst = append0Pad(dst, year, 4) + case 'g': // like %G, but two digit year (no century) + year, _ := t.ISOWeek() + dst = append0Pad(dst, year%100, 2) + case 'H': // hour as number on 24hr clock + dst = append0Pad(dst, t.Hour(), 2) + case 'I': // hour as number on 12hr clock + dst = append0Pad(dst, t.Hour()%12, 2) + case 'j': // day of year as decimal number + dst = append0Pad(dst, t.YearDay(), 3) + case 'k': // 24hr as number, space padded + dst = appendSpacePad(dst, t.Hour()) + case 'l': // 12hr as number, space padded + dst = appendSpacePad(dst, t.Hour()%12) + case 'm': // month as number + dst = append0Pad(dst, int(t.Month()), 2) + case 'M': // minute as number + dst = append0Pad(dst, t.Minute(), 2) + case 'n': // newline + dst = append(dst, '\n') + case 'p': // AM or PM + dst = appendAMPM(dst, t.Hour()) + case 'P': // like %p buf lowercase + dst = appendampm(dst, t.Hour()) + case 'r': // %I:%M:%S %p + h := t.Hour() + dst = append0Pad(dst, h%12, 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + dst = append(dst, ' ') + dst = appendAMPM(dst, h) + case 'R': // %H:%M + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + case 's': // seconds since epoch + dst = strconv.AppendInt(dst, t.Unix(), 10) + case 'S': // second as number thru 60 for leap second + dst = append0Pad(dst, t.Second(), 2) + case 't': // tab + dst = append(dst, '\t') + case 'T': // %H:%M:%S + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + case 'u': // day of week as num; Monday is 1 + day := byte(t.Weekday()) + if day == 0 { + day = 7 + } + dst = append(dst, '0'+day) + case 'U': // week number of year starting from first Sunday + dst = append0Pad(dst, (t.YearDay()-int(t.Weekday())+7)/7, 2) + case 'V': // iso8601 week number + _, week := t.ISOWeek() + dst = append0Pad(dst, week, 2) + case 'w': // day of week, 0 to 6, Sunday 0 + dst = strconv.AppendInt(dst, int64(t.Weekday()), 10) + case 'W': // week number of year starting from first Monday + dst = append0Pad(dst, (t.YearDay()-(int(t.Weekday())+6)%7+7)/7, 2) + case 'x': // date representation for current locale; we go DD/MM/YY + dst = append0Pad(dst, t.Day(), 2) + dst = append(dst, '/') + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Year()%100, 2) + case 'X': // time representation for current locale; we go HH:MM:SS + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + case 'y': // year as num without century + dst = append0Pad(dst, t.Year()%100, 2) + case 'Y': // year as a num + dst = append0Pad(dst, t.Year(), 4) + case 'z': // +hhmm or -hhmm offset from utc + dst = t.AppendFormat(dst, "-0700") + case 'Z': // timezone + dst = t.AppendFormat(dst, "MST") + case '+': // date and time in date(1) format + dst = t.AppendFormat(dst, "Mon Jan _2 15:04:05 MST 2006") + case '%': + dst = append(dst, '%') + } + } + return dst +} + +// all space padded numbers are two length +func appendSpacePad(p []byte, n int) []byte { + if n < 10 { + return append(p, ' ', '0'+byte(n)) + } + return strconv.AppendInt(p, int64(n), 10) +} + +func append0Pad(dst []byte, n, size int) []byte { + switch size { + case 4: + if n < 1000 { + dst = append(dst, '0') + } + fallthrough + case 3: + if n < 100 { + dst = append(dst, '0') + } + fallthrough + case 2: + if n < 10 { + dst = append(dst, '0') + } + } + return strconv.AppendInt(dst, int64(n), 10) +} + +func appendampm(p []byte, h int) []byte { + if h < 12 { + return append(p, 'a', 'm') + } + return append(p, 'p', 'm') +} + +func appendAMPM(p []byte, h int) []byte { + if h < 12 { + return append(p, 'A', 'M') + } + return append(p, 'P', 'M') +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go b/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go new file mode 100644 index 000000000000..3c25284f31df --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go @@ -0,0 +1,922 @@ +package kgo + +import ( + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +///////////// +// HELPERS // -- ugly types to eliminate the toil of nil maps and lookups +///////////// + +func dupmsi32(m map[string]int32) map[string]int32 { + d := make(map[string]int32, len(m)) + for t, ps := range m { + d[t] = ps + } + return d +} + +// "Atomic map of topic partitions", for lack of a better name at this point. +type amtps struct { + v atomic.Value +} + +func (a *amtps) read() map[string][]int32 { + v := a.v.Load() + if v == nil { + return nil + } + return v.(map[string][]int32) +} + +func (a *amtps) write(fn func(map[string][]int32)) { + dup := a.clone() + fn(dup) + a.store(dup) +} + +func (a *amtps) clone() map[string][]int32 { + orig := a.read() + dup := make(map[string][]int32, len(orig)) + for t, ps := range orig { + dup[t] = append(dup[t], ps...) + } + return dup +} + +func (a *amtps) store(m map[string][]int32) { a.v.Store(m) } + +type mtps map[string][]int32 + +func (m mtps) String() string { + var sb strings.Builder + var topicsWritten int + ts := make([]string, 0, len(m)) + var ps []int32 + for t := range m { + ts = append(ts, t) + } + sort.Strings(ts) + for _, t := range ts { + ps = append(ps[:0], m[t]...) + sort.Slice(ps, func(i, j int) bool { return ps[i] < ps[j] }) + topicsWritten++ + fmt.Fprintf(&sb, "%s%v", t, ps) + if topicsWritten < len(m) { + sb.WriteString(", ") + } + } + return sb.String() +} + +type mtmps map[string]map[int32]struct{} // map of topics to map of partitions + +func (m *mtmps) add(t string, p int32) { + if *m == nil { + *m = make(mtmps) + } + mps := (*m)[t] + if mps == nil { + mps = make(map[int32]struct{}) + (*m)[t] = mps + } + mps[p] = struct{}{} +} + +func (m *mtmps) addt(t string) { + if *m == nil { + *m = make(mtmps) + } + mps := (*m)[t] + if mps == nil { + mps = make(map[int32]struct{}) + (*m)[t] = mps + } +} + +func (m mtmps) onlyt(t string) bool { + if m == nil { + return false + } + ps, exists := m[t] + return exists && len(ps) == 0 +} + +func (m mtmps) remove(t string, p int32) { + if m == nil { + return + } + mps, exists := m[t] + if !exists { + return + } + delete(mps, p) + if len(mps) == 0 { + delete(m, t) + } +} + +//////////// +// PAUSED // -- types for pausing topics and partitions +//////////// + +type pausedTopics map[string]pausedPartitions + +type pausedPartitions struct { + all bool + m map[int32]struct{} +} + +func (m pausedTopics) t(topic string) (pausedPartitions, bool) { + if len(m) == 0 { // potentially nil + return pausedPartitions{}, false + } + pps, exists := m[topic] + return pps, exists +} + +func (m pausedTopics) has(topic string, partition int32) (paused bool) { + if len(m) == 0 { + return false + } + pps, exists := m[topic] + if !exists { + return false + } + if pps.all { + return true + } + _, exists = pps.m[partition] + return exists +} + +func (m pausedTopics) addTopics(topics ...string) { + for _, topic := range topics { + pps, exists := m[topic] + if !exists { + pps = pausedPartitions{m: make(map[int32]struct{})} + } + pps.all = true + m[topic] = pps + } +} + +func (m pausedTopics) delTopics(topics ...string) { + for _, topic := range topics { + pps, exists := m[topic] + if !exists { + continue + } + pps.all = false + if !pps.all && len(pps.m) == 0 { + delete(m, topic) + } + } +} + +func (m pausedTopics) addPartitions(topicPartitions map[string][]int32) { + for topic, partitions := range topicPartitions { + pps, exists := m[topic] + if !exists { + pps = pausedPartitions{m: make(map[int32]struct{})} + } + for _, partition := range partitions { + pps.m[partition] = struct{}{} + } + m[topic] = pps + } +} + +func (m pausedTopics) delPartitions(topicPartitions map[string][]int32) { + for topic, partitions := range topicPartitions { + pps, exists := m[topic] + if !exists { + continue + } + for _, partition := range partitions { + delete(pps.m, partition) + } + if !pps.all && len(pps.m) == 0 { + delete(m, topic) + } + } +} + +func (m pausedTopics) pausedTopics() []string { + var r []string + for topic, pps := range m { + if pps.all { + r = append(r, topic) + } + } + return r +} + +func (m pausedTopics) pausedPartitions() map[string][]int32 { + r := make(map[string][]int32) + for topic, pps := range m { + ps := make([]int32, 0, len(pps.m)) + for partition := range pps.m { + ps = append(ps, partition) + } + r[topic] = ps + } + return r +} + +func (m pausedTopics) clone() pausedTopics { + dup := make(pausedTopics) + dup.addTopics(m.pausedTopics()...) + dup.addPartitions(m.pausedPartitions()) + return dup +} + +////////// +// GUTS // -- the key types for storing important metadata for topics & partitions +////////// + +func newTopicPartitions() *topicPartitions { + parts := new(topicPartitions) + parts.v.Store(new(topicPartitionsData)) + return parts +} + +// Contains all information about a topic's partitions. +type topicPartitions struct { + v atomic.Value // *topicPartitionsData + + partsMu sync.Mutex + partitioner TopicPartitioner + lb *leastBackupInput // for partitioning if the partitioner is a LoadTopicPartitioner +} + +func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } + +func newTopicsPartitions() *topicsPartitions { + var t topicsPartitions + t.v.Store(make(topicsPartitionsData)) + return &t +} + +// A helper type mapping topics to their partitions; +// this is the inner value of topicPartitions.v. +type topicsPartitionsData map[string]*topicPartitions + +func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } +func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData { + tp, exists := d[t] + if !exists { + return nil + } + return tp.load() +} + +// A helper type mapping topics to their partitions that can be updated +// atomically. +type topicsPartitions struct { + v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) +} + +func (t *topicsPartitions) load() topicsPartitionsData { + if t == nil { + return nil + } + return t.v.Load().(topicsPartitionsData) +} +func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } +func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } +func (t *topicsPartitions) clone() topicsPartitionsData { + current := t.load() + clone := make(map[string]*topicPartitions, len(current)) + for k, v := range current { + clone[k] = v + } + return clone +} + +// Ensures that the topics exist in the returned map, but does not store the +// update. This can be used to update the data and store later, rather than +// storing immediately. +func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { + var cloned bool + current := t.load() + for _, topic := range topics { + if _, exists := current[topic]; !exists { + if !cloned { + current = t.clone() + cloned = true + } + current[topic] = newTopicPartitions() + } + } + return current +} + +// Opposite of ensureTopics, this purges the input topics and *does* store. +func (t *topicsPartitions) purgeTopics(topics []string) { + var cloned bool + current := t.load() + for _, topic := range topics { + if _, exists := current[topic]; exists { + if !cloned { + current = t.clone() + cloned = true + } + delete(current, topic) + } + } + if cloned { + t.storeData(current) + } +} + +// Updates the topic partitions data atomic value. +// +// If this is the first time seeing partitions, we do processing of unknown +// partitions that may be buffered for producing. +func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { + // If the topic already had partitions, then there would be no + // unknown topic waiting and we do not need to notify anything. + if hadPartitions { + l.v.Store(lv) + return + } + + p := &cl.producer + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + // If the topic did not have partitions, then we need to store the + // partition update BEFORE unlocking the mutex to guard against this + // sequence of events: + // + // - unlock waiters + // - delete waiter + // - new produce recreates waiter + // - we store update + // - we never notify the recreated waiter + // + // By storing before releasing the locks, we ensure that later + // partition loads for this topic under the mu will see our update. + defer l.v.Store(lv) + + // If there are no unknown topics or this topic is not unknown, then we + // have nothing to do. + if len(p.unknownTopics) == 0 { + return + } + unknown, exists := p.unknownTopics[topic] + if !exists { + return + } + + // If we loaded no partitions because of a retryable error, we signal + // the waiting goroutine that a try happened. It is possible the + // goroutine is quitting and will not be draining unknownWait, so we do + // not require the send. + if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { + select { + case unknown.wait <- lv.loadErr: + default: + } + return + } + + // Either we have a fatal error or we can successfully partition. + // + // Even with a fatal error, if we loaded any partitions, we partition. + // If we only had a fatal error, we can finish promises in a goroutine. + // If we are partitioning, we have to do it under the unknownMu to + // ensure prior buffered records are produced in order before we + // release the mu. + delete(p.unknownTopics, topic) + close(unknown.wait) // allow waiting goroutine to quit + + if len(lv.partitions) == 0 { + cl.producer.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: lv.loadErr, + }) + } else { + for _, pr := range unknown.buffered { + cl.doPartitionRecord(l, lv, pr) + } + } +} + +// If a metadata request fails after retrying (internally retrying, so only a +// few times), or the metadata request does not return topics that we requested +// (which may also happen additionally consuming via regex), then we need to +// bump errors for topics that were previously loaded, and bump errors for +// topics awaiting load. +// +// This has two modes of operation: +// +// 1. if no topics were missing, then the metadata request failed outright, +// and we need to bump errors on all stored topics and unknown topics. +// +// 2. if topics were missing, then the metadata request was successful but +// had missing data, and we need to bump errors on only what was mising. +func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { + p := &cl.producer + + // mode 1 + if len(missingTopics) == 0 { + for _, topic := range requested { + for _, topicPartition := range topic.load().partitions { + topicPartition.records.bumpRepeatedLoadErr(err) + } + } + } + + // mode 2 + var missing map[string]bool + for _, failTopic := range missingTopics { + if missing == nil { + missing = make(map[string]bool, len(missingTopics)) + } + missing[failTopic] = true + + if topic, exists := requested[failTopic]; exists { + for _, topicPartition := range topic.load().partitions { + topicPartition.records.bumpRepeatedLoadErr(err) + } + } + } + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + for topic, unknown := range p.unknownTopics { + // if nil, mode 1 (req err), else mode 2 (missing resp) + if missing != nil && !missing[topic] { + continue + } + + select { + case unknown.wait <- err: + default: + } + } +} + +// topicPartitionsData is the data behind a topicPartitions' v. +// +// We keep this in an atomic because it is expected to be extremely read heavy, +// and if it were behind a lock, the lock would need to be held for a while. +type topicPartitionsData struct { + // NOTE if adding anything to this struct, be sure to fix meta merge. + loadErr error // could be auth, unknown, leader not avail, or creation err + isInternal bool + partitions []*topicPartition // partition num => partition + writablePartitions []*topicPartition // subset of above + topic string + when int64 +} + +// topicPartition contains all information from Kafka for a topic's partition, +// as well as what a client is producing to it or info about consuming from it. +type topicPartition struct { + // If we have a load error (leader/listener/replica not available), we + // keep the old topicPartition data and the new error. + loadErr error + + // If, on metadata refresh, the leader epoch for this partition goes + // backwards, we ignore the metadata refresh and signal the metadata + // should be reloaded: the broker we requested is stale. However, the + // broker could get into a bad state through some weird cluster failure + // scenarios. If we see the epoch rewind repeatedly, we eventually keep + // the metadata refresh. This is not detrimental and at worst will lead + // to the broker telling us to update our metadata. + epochRewinds uint8 + + // If we do not have a load error, we determine if the new + // topicPartition is the same or different from the old based on + // whether the data changed (leader or leader epoch, etc.). + topicPartitionData + + // If we do not have a load error, we copy the records and cursor + // pointers from the old after updating any necessary fields in them + // (see migrate functions below). + // + // Only one of records or cursor is non-nil. + records *recBuf + cursor *cursor +} + +func (tp *topicPartition) partition() int32 { + if tp.records != nil { + return tp.records.partition + } + return tp.cursor.partition +} + +// Contains stuff that changes on metadata update that we copy into a cursor or +// recBuf. +type topicPartitionData struct { + // Our leader; if metadata sees this change, the metadata update + // migrates the cursor to a different source with the session stopped, + // and the recBuf to a different sink under a tight mutex. + leader int32 + + // What we believe to be the epoch of the leader for this partition. + // + // For cursors, for KIP-320, if a broker receives a fetch request where + // the current leader epoch does not match the brokers, either the + // broker is behind and returns UnknownLeaderEpoch, or we are behind + // and the broker returns FencedLeaderEpoch. For the former, we back + // off and retry. For the latter, we update our metadata. + leaderEpoch int32 +} + +// migrateProductionTo is called on metadata update if a topic partition's sink +// has changed. This moves record production from one sink to the other; this +// must be done such that records produced during migration follow those +// already buffered. +func (old *topicPartition) migrateProductionTo(new *topicPartition) { //nolint:revive // old/new naming makes this clearer + // First, remove our record buffer from the old sink. + old.records.sink.removeRecBuf(old.records) + + // Before this next lock, record producing will buffer to the + // in-migration-progress records and may trigger draining to + // the old sink. That is fine, the old sink no longer consumes + // from these records. We just have wasted drain triggers. + + old.records.mu.Lock() // guard setting sink and topic partition data + old.records.sink = new.records.sink + old.records.topicPartitionData = new.topicPartitionData + old.records.mu.Unlock() + + // After the unlock above, record buffering can trigger drains + // on the new sink, which is not yet consuming from these + // records. Again, just more wasted drain triggers. + + old.records.sink.addRecBuf(old.records) // add our record source to the new sink + + // At this point, the new sink will be draining our records. We lastly + // need to copy the records pointer to our new topicPartition. + new.records = old.records +} + +// migrateCursorTo is called on metadata update if a topic partition's leader +// or leader epoch has changed. +// +// This is a little bit different from above, in that we do this logic only +// after stopping a consumer session. With the consumer session stopped, we +// have fewer concurrency issues to worry about. +func (old *topicPartition) migrateCursorTo( //nolint:revive // old/new naming makes this clearer + new *topicPartition, + css *consumerSessionStopper, +) { + css.stop() + + old.cursor.source.removeCursor(old.cursor) + + // With the session stopped, we can update fields on the old cursor + // with no concurrency issue. + old.cursor.source = new.cursor.source + + // KIP-320: if we had consumed some messages, we need to validate the + // leader epoch on the new broker to see if we experienced data loss + // before we can use this cursor. + // + // Metadata ensures that leaderEpoch is non-negative only if the broker + // supports KIP-320. + if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { + // Since the cursor consumed messages, it is definitely usable. + // We use it so that the epoch load can finish using it + // properly. + old.cursor.use() + css.reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: old.cursor.offset, + epoch: old.cursor.lastConsumedEpoch, + }, + }) + } + + old.cursor.topicPartitionData = new.topicPartitionData + + old.cursor.source.addCursor(old.cursor) + new.cursor = old.cursor +} + +type kip951move struct { + recBufs map[*recBuf]topicPartitionData + cursors map[*cursor]topicPartitionData + brokers []BrokerMetadata +} + +func (k *kip951move) empty() bool { + return len(k.brokers) == 0 +} + +func (k *kip951move) hasRecBuf(rb *recBuf) bool { + if k == nil || k.recBufs == nil { + return false + } + _, ok := k.recBufs[rb] + return ok +} + +func (k *kip951move) maybeAddProducePartition(resp *kmsg.ProduceResponse, p *kmsg.ProduceResponseTopicPartition, rb *recBuf) bool { + if resp.GetVersion() < 10 || + p.ErrorCode != kerr.NotLeaderForPartition.Code || + len(resp.Brokers) == 0 || + p.CurrentLeader.LeaderID < 0 || + p.CurrentLeader.LeaderEpoch < 0 { + return false + } + if len(k.brokers) == 0 { + for _, rb := range resp.Brokers { + b := BrokerMetadata{ + NodeID: rb.NodeID, + Host: rb.Host, + Port: rb.Port, + Rack: rb.Rack, + } + k.brokers = append(k.brokers, b) + } + } + if k.recBufs == nil { + k.recBufs = make(map[*recBuf]topicPartitionData) + } + k.recBufs[rb] = topicPartitionData{ + leader: p.CurrentLeader.LeaderID, + leaderEpoch: p.CurrentLeader.LeaderEpoch, + } + return true +} + +func (k *kip951move) maybeAddFetchPartition(resp *kmsg.FetchResponse, p *kmsg.FetchResponseTopicPartition, c *cursor) bool { + if resp.GetVersion() < 16 || + p.ErrorCode != kerr.NotLeaderForPartition.Code || + len(resp.Brokers) == 0 || + p.CurrentLeader.LeaderID < 0 || + p.CurrentLeader.LeaderEpoch < 0 { + return false + } + + if len(k.brokers) == 0 { + for _, rb := range resp.Brokers { + b := BrokerMetadata{ + NodeID: rb.NodeID, + Host: rb.Host, + Port: rb.Port, + Rack: rb.Rack, + } + k.brokers = append(k.brokers, b) + } + } + if k.cursors == nil { + k.cursors = make(map[*cursor]topicPartitionData) + } + k.cursors[c] = topicPartitionData{ + leader: p.CurrentLeader.LeaderID, + leaderEpoch: p.CurrentLeader.LeaderEpoch, + } + return true +} + +func (k *kip951move) ensureSinksAndSources(cl *Client) { + cl.sinksAndSourcesMu.Lock() + defer cl.sinksAndSourcesMu.Unlock() + + ensure := func(leader int32) { + if _, exists := cl.sinksAndSources[leader]; exists { + return + } + cl.sinksAndSources[leader] = sinkAndSource{ + sink: cl.newSink(leader), + source: cl.newSource(leader), + } + } + + for _, td := range k.recBufs { + ensure(td.leader) + } + for _, td := range k.cursors { + ensure(td.leader) + } +} + +func (k *kip951move) ensureBrokers(cl *Client) { + if len(k.brokers) == 0 { + return + } + + kbs := make([]kmsg.MetadataResponseBroker, 0, len(k.brokers)) + for _, b := range k.brokers { + kbs = append(kbs, kmsg.MetadataResponseBroker{ + NodeID: b.NodeID, + Host: b.Host, + Port: b.Port, + Rack: b.Rack, + }) + } + cl.updateBrokers(kbs) +} + +func (k *kip951move) maybeBeginMove(cl *Client) { + if k.empty() { + return + } + // We want to do the move independent of whatever is calling us, BUT we + // want to ensure it is not concurrent with a metadata request. + go cl.blockingMetadataFn(func() { + k.ensureBrokers(cl) + k.ensureSinksAndSources(cl) + k.doMove(cl) + }) +} + +func (k *kip951move) doMove(cl *Client) { + // Moving partitions is theoretically simple, but the client is written + // in a confusing way around concurrency. + // + // The problem is that topicPartitionsData is read-only after + // initialization. Updates are done via atomic stores of the containing + // topicPartitionsData struct. Moving a single partition requires some + // deep copying. + + // oldNew pairs what NEEDS to be atomically updated (old; left value) + // with the value that WILL be stored (new; right value). + type oldNew struct { + l *topicPartitions + r *topicPartitionsData + } + topics := make(map[string]oldNew) + + // getT returns the oldNew for the topic, performing a shallow clone of + // the old whole-topic struct. + getT := func(m topicsPartitionsData, topic string) (oldNew, bool) { + lr, ok := topics[topic] + if !ok { + l := m[topic] + if l == nil { + return oldNew{}, false + } + dup := *l.load() + r := &dup + r.writablePartitions = append([]*topicPartition{}, r.writablePartitions...) + r.partitions = append([]*topicPartition{}, r.partitions...) + lr = oldNew{l, r} + topics[topic] = lr + } + return lr, true + } + + // modifyP returns the old topicPartition and a new one that will be + // used in migrateTo. The new topicPartition only contains the sink + // and topicPartitionData that will be copied into old under old's + // mutex. The actual migration is done in the migrate function (see + // below). + // + // A migration is not needed if the old value has a higher leader + // epoch. If the leader epoch is equal, we check if the leader is the + // same (this allows easier injection of failures in local testing). A + // higher epoch can come from a concurrent metadata update that + // actually performed the move first. + modifyP := func(d *topicPartitionsData, partition int32, td topicPartitionData) (old, new *topicPartition, modified bool) { + old = d.partitions[partition] + if old.leaderEpoch > td.leaderEpoch { + return nil, nil, false + } + if old.leaderEpoch == td.leaderEpoch && old.leader == td.leader { + return nil, nil, false + } + + cl.sinksAndSourcesMu.Lock() + sns := cl.sinksAndSources[td.leader] + cl.sinksAndSourcesMu.Unlock() + + dup := *old + new = &dup + new.topicPartitionData = topicPartitionData{ + leader: td.leader, + leaderEpoch: td.leaderEpoch, + } + if new.records != nil { + new.records = &recBuf{ + sink: sns.sink, + topicPartitionData: new.topicPartitionData, + } + } else { + new.cursor = &cursor{ + source: sns.source, + topicPartitionData: new.topicPartitionData, + } + } + + // We now have to mirror the new partition back to the topic + // slice that will be atomically stored. + d.partitions[partition] = new + idxWritable := sort.Search(len(d.writablePartitions), func(i int) bool { return d.writablePartitions[i].partition() >= partition }) + if idxWritable < len(d.writablePartitions) && d.writablePartitions[idxWritable].partition() == partition { + if d.writablePartitions[idxWritable] != old { + panic("invalid invariant -- partition in writablePartitions != partition at expected index in partitions") + } + d.writablePartitions[idxWritable] = new + } + + return old, new, true + } + + if k.recBufs != nil { + tpsProducer := cl.producer.topics.load() // must be non-nil, since we have recBufs to move + for recBuf, td := range k.recBufs { + lr, ok := getT(tpsProducer, recBuf.topic) + if !ok { + continue // perhaps concurrently purged + } + old, new, modified := modifyP(lr.r, recBuf.partition, td) + if modified { + cl.cfg.logger.Log(LogLevelInfo, "moving producing partition due to kip-951 not_leader_for_partition", + "topic", recBuf.topic, + "partition", recBuf.partition, + "new_leader", new.leader, + "new_leader_epoch", new.leaderEpoch, + "old_leader", old.leader, + "old_leader_epoch", old.leaderEpoch, + ) + old.migrateProductionTo(new) + } else { + recBuf.clearFailing() + } + } + } else { + var tpsConsumer topicsPartitionsData + c := &cl.consumer + switch { + case c.g != nil: + tpsConsumer = c.g.tps.load() + case c.d != nil: + tpsConsumer = c.d.tps.load() + } + css := &consumerSessionStopper{cl: cl} + defer css.maybeRestart() + for cursor, td := range k.cursors { + lr, ok := getT(tpsConsumer, cursor.topic) + if !ok { + continue // perhaps concurrently purged + } + old, new, modified := modifyP(lr.r, cursor.partition, td) + if modified { + cl.cfg.logger.Log(LogLevelInfo, "moving consuming partition due to kip-951 not_leader_for_partition", + "topic", cursor.topic, + "partition", cursor.partition, + "new_leader", new.leader, + "new_leader_epoch", new.leaderEpoch, + "old_leader", old.leader, + "old_leader_epoch", old.leaderEpoch, + ) + old.migrateCursorTo(new, css) + } + } + } + + // We can always do a simple store. For producing, we *must* have + // had partitions, so this is not updating an unknown topic. + for _, lr := range topics { + lr.l.v.Store(lr.r) + } +} + +// Migrating a cursor requires stopping any consumer session. If we +// stop a session, we need to eventually re-start any offset listing or +// epoch loading that was stopped. Thus, we simply merge what we +// stopped into what we will reload. +type consumerSessionStopper struct { + cl *Client + stopped bool + reloadOffsets listOrEpochLoads + tpsPrior *topicsPartitions +} + +func (css *consumerSessionStopper) stop() { + if css.stopped { + return + } + css.stopped = true + loads, tps := css.cl.consumer.stopSession() + css.reloadOffsets.mergeFrom(loads) + css.tpsPrior = tps +} + +func (css *consumerSessionStopper) maybeRestart() { + if !css.stopped { + return + } + session := css.cl.consumer.startNewSession(css.tpsPrior) + defer session.decWorker() + css.reloadOffsets.loadWithSession(session, "resuming reload offsets after session stopped for cursor migrating in metadata") +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go b/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go new file mode 100644 index 000000000000..25cfd44356f6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go @@ -0,0 +1,1257 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" + + "github.com/twmb/franz-go/pkg/kerr" +) + +func ctx2fn(ctx context.Context) func() context.Context { return func() context.Context { return ctx } } + +// TransactionEndTry is simply a named bool. +type TransactionEndTry bool + +const ( + // TryAbort attempts to end a transaction with an abort. + TryAbort TransactionEndTry = false + + // TryCommit attempts to end a transaction with a commit. + TryCommit TransactionEndTry = true +) + +// GroupTransactSession abstracts away the proper way to begin and end a +// transaction when consuming in a group, modifying records, and producing +// (EOS). +// +// If you are running Kafka 2.5+, it is strongly recommended that you also use +// RequireStableFetchOffsets. See that config option's documentation for more +// details. +type GroupTransactSession struct { + cl *Client + + failMu sync.Mutex + + revoked bool + revokedCh chan struct{} // closed once when revoked is set; reset after End + lost bool + lostCh chan struct{} // closed once when lost is set; reset after End +} + +// NewGroupTransactSession is exactly the same as NewClient, but wraps the +// client's OnPartitionsRevoked / OnPartitionsLost to ensure that transactions +// are correctly aborted whenever necessary so as to properly provide EOS. +// +// When ETLing in a group in a transaction, if a rebalance happens before the +// transaction is ended, you either (a) must block the rebalance from finishing +// until you are done producing, and then commit before unblocking, or (b) +// allow the rebalance to happen, but abort any work you did. +// +// The problem with (a) is that if your ETL work loop is slow, you run the risk +// of exceeding the rebalance timeout and being kicked from the group. You will +// try to commit, and depending on the Kafka version, the commit may even be +// erroneously successful (pre Kafka 2.5). This will lead to duplicates. +// +// Instead, for safety, a GroupTransactSession favors (b). If a rebalance +// occurs at any time before ending a transaction with a commit, this will +// abort the transaction. +// +// This leaves the risk that ending the transaction itself exceeds the +// rebalance timeout, but this is just one request with no cpu logic. With a +// proper rebalance timeout, this single request will not fail and the commit +// will succeed properly. +// +// If this client detects you are talking to a pre-2.5 cluster, OR if you have +// not enabled RequireStableFetchOffsets, the client will sleep for 200ms after +// a successful commit to allow Kafka's txn markers to propagate. This is not +// foolproof in the event of some extremely unlikely communication patterns and +// **potentially** could allow duplicates. See this repo's transaction's doc +// for more details. +func NewGroupTransactSession(opts ...Opt) (*GroupTransactSession, error) { + s := &GroupTransactSession{ + revokedCh: make(chan struct{}), + lostCh: make(chan struct{}), + } + + var noGroup error + + // We append one option, which will get applied last. Because it is + // applied last, we can execute some logic and override some existing + // options. + opts = append(opts, groupOpt{func(cfg *cfg) { + if cfg.group == "" { + cfg.seedBrokers = nil // force a validation error + noGroup = errors.New("missing required group") + return + } + + userRevoked := cfg.onRevoked + cfg.onRevoked = func(ctx context.Context, cl *Client, rev map[string][]int32) { + s.failMu.Lock() + defer s.failMu.Unlock() + if s.revoked { + return + } + + if cl.consumer.g.cooperative.Load() && len(rev) == 0 && !s.revoked { + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_revoke with nothing to revoke; allowing next commit") + } else { + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_revoke; aborting next commit if we are currently in a transaction") + s.revoked = true + close(s.revokedCh) + } + + if userRevoked != nil { + userRevoked(ctx, cl, rev) + } + } + + userLost := cfg.onLost + cfg.onLost = func(ctx context.Context, cl *Client, lost map[string][]int32) { + s.failMu.Lock() + defer s.failMu.Unlock() + if s.lost { + return + } + + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_lost; aborting next commit if we are currently in a transaction") + s.lost = true + close(s.lostCh) + + if userLost != nil { + userLost(ctx, cl, lost) + } else if userRevoked != nil { + userRevoked(ctx, cl, lost) + } + } + }}) + + cl, err := NewClient(opts...) + if err != nil { + if noGroup != nil { + err = noGroup + } + return nil, err + } + s.cl = cl + return s, nil +} + +// Client returns the underlying client that this transact session wraps. This +// can be useful for functions that require a client, such as raw requests. The +// returned client should not be used to manage transactions (leave that to the +// GroupTransactSession). +func (s *GroupTransactSession) Client() *Client { + return s.cl +} + +// Close is a wrapper around Client.Close, with the exact same semantics. +// Refer to that function's documentation. +// +// This function must be called to leave the group before shutting down. +func (s *GroupTransactSession) Close() { + s.cl.Close() +} + +// PollFetches is a wrapper around Client.PollFetches, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call PollFetches concurrently with Begin or End. +func (s *GroupTransactSession) PollFetches(ctx context.Context) Fetches { + return s.cl.PollFetches(ctx) +} + +// PollRecords is a wrapper around Client.PollRecords, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call PollRecords concurrently with Begin or End. +func (s *GroupTransactSession) PollRecords(ctx context.Context, maxPollRecords int) Fetches { + return s.cl.PollRecords(ctx, maxPollRecords) +} + +// ProduceSync is a wrapper around Client.ProduceSync, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call ProduceSync concurrently with Begin or End. +func (s *GroupTransactSession) ProduceSync(ctx context.Context, rs ...*Record) ProduceResults { + return s.cl.ProduceSync(ctx, rs...) +} + +// Produce is a wrapper around Client.Produce, with the exact same semantics. +// Refer to that function's documentation. +// +// It is invalid to call Produce concurrently with Begin or End. +func (s *GroupTransactSession) Produce(ctx context.Context, r *Record, promise func(*Record, error)) { + s.cl.Produce(ctx, r, promise) +} + +// TryProduce is a wrapper around Client.TryProduce, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call TryProduce concurrently with Begin or End. +func (s *GroupTransactSession) TryProduce(ctx context.Context, r *Record, promise func(*Record, error)) { + s.cl.TryProduce(ctx, r, promise) +} + +// Begin begins a transaction, returning an error if the client has no +// transactional id or is already in a transaction. Begin must be called +// before producing records in a transaction. +func (s *GroupTransactSession) Begin() error { + s.cl.cfg.logger.Log(LogLevelInfo, "beginning transact session") + return s.cl.BeginTransaction() +} + +func (s *GroupTransactSession) failed() bool { + return s.revoked || s.lost +} + +// End ends a transaction, committing if commit is true, if the group did not +// rebalance since the transaction began, and if committing offsets is +// successful. If any of these conditions are false, this aborts. This flushes +// or aborts depending on `commit`. +// +// This returns whether the transaction committed or any error that occurred. +// No returned error is retryable. Either the transactional ID has entered a +// failed state, or the client retried so much that the retry limit was hit, +// and odds are you should not continue. While a context is allowed, canceling +// it will likely leave the client in an invalid state. Canceling should only +// be done if you want to shut down. +func (s *GroupTransactSession) End(ctx context.Context, commit TransactionEndTry) (committed bool, err error) { + defer func() { + s.failMu.Lock() + s.revoked = false + s.revokedCh = make(chan struct{}) + s.lost = false + s.lostCh = make(chan struct{}) + s.failMu.Unlock() + }() + + switch commit { + case TryCommit: + if err := s.cl.Flush(ctx); err != nil { + return false, err // we do not abort below, because an error here is ctx closing + } + case TryAbort: + if err := s.cl.AbortBufferedRecords(ctx); err != nil { + return false, err // same + } + } + + wantCommit := bool(commit) + + s.failMu.Lock() + failed := s.failed() + + precommit := s.cl.CommittedOffsets() + postcommit := s.cl.UncommittedOffsets() + s.failMu.Unlock() + + var hasAbortableCommitErr bool + var commitErr error + var g *groupConsumer + + kip447 := false + if wantCommit && !failed { + isAbortableCommitErr := func(err error) bool { + // ILLEGAL_GENERATION: rebalance began and completed + // before we committed. + // + // REBALANCE_IN_PREGRESS: rebalance began, abort. + // + // COORDINATOR_NOT_AVAILABLE, + // COORDINATOR_LOAD_IN_PROGRESS, + // NOT_COORDINATOR: request failed too many times + // + // CONCURRENT_TRANSACTIONS: Kafka not harmonized, + // we can just abort. + // + // UNKNOWN_SERVER_ERROR: technically should not happen, + // but we can just abort. Redpanda returns this in + // certain versions. + switch { + case errors.Is(err, kerr.IllegalGeneration), + errors.Is(err, kerr.RebalanceInProgress), + errors.Is(err, kerr.CoordinatorNotAvailable), + errors.Is(err, kerr.CoordinatorLoadInProgress), + errors.Is(err, kerr.NotCoordinator), + errors.Is(err, kerr.ConcurrentTransactions), + errors.Is(err, kerr.UnknownServerError): + return true + } + return false + } + + var commitErrs []string + + committed := make(chan struct{}) + g = s.cl.commitTransactionOffsets(ctx, postcommit, + func(_ *kmsg.TxnOffsetCommitRequest, resp *kmsg.TxnOffsetCommitResponse, err error) { + defer close(committed) + if err != nil { + if isAbortableCommitErr(err) { + hasAbortableCommitErr = true + return + } + commitErrs = append(commitErrs, err.Error()) + return + } + kip447 = resp.Version >= 3 + + for _, t := range resp.Topics { + for _, p := range t.Partitions { + if err := kerr.ErrorForCode(p.ErrorCode); err != nil { + if isAbortableCommitErr(err) { + hasAbortableCommitErr = true + } else { + commitErrs = append(commitErrs, fmt.Sprintf("topic %s partition %d: %v", t.Topic, p.Partition, err)) + } + } + } + } + }, + ) + <-committed + + if len(commitErrs) > 0 { + commitErr = fmt.Errorf("unable to commit transaction offsets: %s", strings.Join(commitErrs, ", ")) + } + } + + // Now that we have committed our offsets, before we allow them to be + // used, we force a heartbeat. By forcing a heartbeat, if there is no + // error, then we know we have up to RebalanceTimeout to write our + // EndTxnRequest without a problem. + // + // We should not be booted from the group if we receive an ok + // heartbeat, meaning that, as mentioned, we should be able to end the + // transaction safely. + var okHeartbeat bool + if g != nil && commitErr == nil { + waitHeartbeat := make(chan struct{}) + var heartbeatErr error + select { + case g.heartbeatForceCh <- func(err error) { + defer close(waitHeartbeat) + heartbeatErr = err + }: + select { + case <-waitHeartbeat: + okHeartbeat = heartbeatErr == nil + case <-s.revokedCh: + case <-s.lostCh: + } + case <-s.revokedCh: + case <-s.lostCh: + } + } + + s.failMu.Lock() + + // If we know we are KIP-447 and the user is requiring stable, we can + // unlock immediately because Kafka will itself block a rebalance + // fetching offsets from outstanding transactions. + // + // If either of these are false, we spin up a goroutine that sleeps for + // 200ms before unlocking to give Kafka a chance to avoid some odd race + // that would permit duplicates (i.e., what KIP-447 is preventing). + // + // This 200ms is not perfect but it should be well enough time on a + // stable cluster. On an unstable cluster, I still expect clients to be + // slower than intra-cluster communication, but there is a risk. + if kip447 && s.cl.cfg.requireStable { + defer s.failMu.Unlock() + } else { + defer func() { + if committed { + s.cl.cfg.logger.Log(LogLevelDebug, "sleeping 200ms before allowing a rebalance to continue to give the brokers a chance to write txn markers and avoid duplicates") + go func() { + time.Sleep(200 * time.Millisecond) + s.failMu.Unlock() + }() + } else { + s.failMu.Unlock() + } + }() + } + + tryCommit := !s.failed() && commitErr == nil && !hasAbortableCommitErr && okHeartbeat + willTryCommit := wantCommit && tryCommit + + s.cl.cfg.logger.Log(LogLevelInfo, "transaction session ending", + "was_failed", s.failed(), + "want_commit", wantCommit, + "can_try_commit", tryCommit, + "will_try_commit", willTryCommit, + ) + + // We have a few potential retryable errors from EndTransaction. + // OperationNotAttempted will be returned at most once. + // + // UnknownServerError should not be returned, but some brokers do: + // technically this is fatal, but there is no downside to retrying + // (even retrying a commit) and seeing if we are successful or if we + // get a better error. + var tries int +retry: + endTxnErr := s.cl.EndTransaction(ctx, TransactionEndTry(willTryCommit)) + tries++ + if endTxnErr != nil && tries < 10 { + switch { + case errors.Is(endTxnErr, kerr.OperationNotAttempted): + s.cl.cfg.logger.Log(LogLevelInfo, "end transaction with commit not attempted; retrying as abort") + willTryCommit = false + goto retry + + case errors.Is(endTxnErr, kerr.UnknownServerError): + s.cl.cfg.logger.Log(LogLevelInfo, "end transaction with commit unknown server error; retrying") + after := time.NewTimer(s.cl.cfg.retryBackoff(tries)) + select { + case <-after.C: // context canceled; we will see when we retry + case <-s.cl.ctx.Done(): + after.Stop() + } + goto retry + } + } + + if !willTryCommit || endTxnErr != nil { + currentCommit := s.cl.CommittedOffsets() + s.cl.cfg.logger.Log(LogLevelInfo, "transact session resetting to current committed state (potentially after a rejoin)", + "tried_commit", willTryCommit, + "commit_err", endTxnErr, + "state_precommit", precommit, + "state_currently_committed", currentCommit, + ) + s.cl.setOffsets(currentCommit, false) + } else if willTryCommit && endTxnErr == nil { + s.cl.cfg.logger.Log(LogLevelInfo, "transact session successful, setting to newly committed state", + "tried_commit", willTryCommit, + "postcommit", postcommit, + ) + s.cl.setOffsets(postcommit, false) + } + + switch { + case commitErr != nil && endTxnErr == nil: + return false, commitErr + + case commitErr == nil && endTxnErr != nil: + return false, endTxnErr + + case commitErr != nil && endTxnErr != nil: + return false, endTxnErr + + default: // both errs nil + committed = willTryCommit + return willTryCommit, nil + } +} + +// BeginTransaction sets the client to a transactional state, erroring if there +// is no transactional ID, or if the producer is currently in a fatal +// (unrecoverable) state, or if the client is already in a transaction. +// +// This must not be called concurrently with other client functions. +func (cl *Client) BeginTransaction() error { + if cl.cfg.txnID == nil { + return errNotTransactional + } + + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + if cl.producer.inTxn { + return errors.New("invalid attempt to begin a transaction while already in a transaction") + } + + needRecover, didRecover, err := cl.maybeRecoverProducerID(context.Background()) + if needRecover && !didRecover { + cl.cfg.logger.Log(LogLevelInfo, "unable to begin transaction due to unrecoverable producer id error", "err", err) + return fmt.Errorf("producer ID has a fatal, unrecoverable error, err: %w", err) + } + + cl.producer.inTxn = true + cl.producer.producingTxn.Store(true) // allow produces for txns now + cl.cfg.logger.Log(LogLevelInfo, "beginning transaction", "transactional_id", *cl.cfg.txnID) + + return nil +} + +// EndBeginTxnHow controls the safety of how EndAndBeginTransaction executes. +type EndBeginTxnHow uint8 + +const ( + // EndBeginTxnSafe ensures a "safe" execution of EndAndBeginTransaction + // at the expense of speed. This option blocks all produce requests and + // only resumes produce requests when onEnd finishes. Note that some + // produce requests may have finished successfully and records that + // were a part of a transaction may have their promises waiting to be + // called: not all promises are guaranteed to be called. + EndBeginTxnSafe EndBeginTxnHow = iota + + // EndBeginTxnUnsafe opts for less safe EndAndBeginTransaction flow to + // achieve higher throughput. This option allows produce requests to + // continue while EndTxn actually commits. This is unsafe because a + // produce request itself only half begins a transaction. Internally, + // AddPartitionsToTxn actually begins a transaction. If your + // application dies before the client is able to successfully issue + // AddPartitionsToTxn, then a transaction will have partially begun + // within Kafka: the partial transaction will prevent the partition + // from being consumable past where the transaction begun, and the + // transaction will not timeout. You will have to restart your + // application with the SAME transactional ID and produce to all the + // same partitions to ensure to resume the transaction and unstick the + // partitions. + // + // Also note: this option does not work on all broker implementations. + // This relies on Kafka internals. Some brokers (notably Redpanda) are + // more strict with enforcing transaction correctness and this option + // cannot be used and will cause errors. + // + // Deprecated: Kafka 3.6 removed support for the hacky behavior that + // this option was abusing. Thus, as of Kafka 3.6, this option does not + // work against Kafka. This option also has never worked for Redpanda + // becuse Redpanda always strictly validated that partitions were a + // part of a transaction. Later versions of Kafka and Redpanda will + // remove the need for AddPartitionsToTxn at all and thus this option + // ultimately will be unnecessary anyway. + EndBeginTxnUnsafe +) + +// EndAndBeginTransaction is a combination of EndTransaction and +// BeginTransaction, and relaxes the restriction that the client must have no +// buffered records. This function does not flush nor abort any buffered +// records. It is ok to concurrently produce while this function executes. +// +// This function has different safety guarantees which are up to the user to +// decide. See the documentation on EndBeginTxnHow for which you would like to +// choose. +// +// The onEnd function is called with your input context and the result of +// EndTransaction. Promises are paused while onEnd executes. If onEnd returns +// an error, BeginTransaction is not called and this function returns the +// result of onEnd. Otherwise, this function returns the result of +// BeginTransaction. See the documentation on EndTransaction and +// BeginTransaction for further details. It is invalid to call this function +// more than once at a time, and it is invalid to call concurrent with +// EndTransaction or BeginTransaction. +func (cl *Client) EndAndBeginTransaction( + ctx context.Context, + how EndBeginTxnHow, + commit TransactionEndTry, + onEnd func(context.Context, error) error, +) (rerr error) { + if g := cl.consumer.g; g != nil { + return errors.New("cannot use EndAndBeginTransaction with EOS") + } + + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + // From BeginTransaction: if we return with no error, we begin. Unlike + // BeginTransaction, we do not error if in a transaction, because we + // expect to be in one. + defer func() { + if rerr == nil { + needRecover, didRecover, err := cl.maybeRecoverProducerID(ctx) + if needRecover && !didRecover { + cl.cfg.logger.Log(LogLevelInfo, "unable to begin transaction due to unrecoverable producer id error", "err", err) + rerr = fmt.Errorf("producer ID has a fatal, unrecoverable error, err: %w", err) + return + } + cl.producer.inTxn = true + cl.cfg.logger.Log(LogLevelInfo, "beginning transaction", "transactional_id", *cl.cfg.txnID) + } + }() + + // If end/beginning safely, we have to pause AddPartitionsToTxn and + // ProduceRequest, and we only resume after the user's onEnd has been + // called. + if how == EndBeginTxnSafe { + if err := cl.producer.pause(ctx); err != nil { + return err + } + defer cl.producer.resume() + } + + // Before BeginTransaction, we block promises & call onEnd with whatever + // the return error is. + cl.producer.promisesMu.Lock() + var promisesUnblocked bool + unblockPromises := func() { + if promisesUnblocked { + return + } + promisesUnblocked = true + defer cl.producer.promisesMu.Unlock() + rerr = onEnd(ctx, rerr) + } + defer unblockPromises() + + if !cl.producer.inTxn { + return nil + } + + var anyAdded bool + var readd map[string][]int32 + for topic, parts := range cl.producer.topics.load() { + for i, part := range parts.load().partitions { + if part.records.addedToTxn.Swap(false) { + if how == EndBeginTxnUnsafe { + if readd == nil { + readd = make(map[string][]int32) + } + readd[topic] = append(readd[topic], int32(i)) + } + anyAdded = true + } + } + } + anyAdded = anyAdded || cl.producer.readded + + // EndTxn when no txn was started returns INVALID_TXN_STATE. + if !anyAdded { + cl.cfg.logger.Log(LogLevelDebug, "no records were produced during the commit; thus no transaction was began; ending without doing anything") + return nil + } + + // From EndTransaction: if the pid has an error, we may try to recover. + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + if commit { + return kerr.OperationNotAttempted + } + if _, didRecover, _ := cl.maybeRecoverProducerID(ctx); didRecover { + return nil + } + } + cl.cfg.logger.Log(LogLevelInfo, "ending transaction", + "transactional_id", *cl.cfg.txnID, + "producer_id", id, + "epoch", epoch, + "commit", commit, + ) + cl.producer.readded = false + err = cl.doWithConcurrentTransactions(ctx, "EndTxn", func() error { + req := kmsg.NewPtrEndTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Commit = bool(commit) + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + + // When ending a transaction, if the user is using unsafe mode, + // there is a logic race where the user can actually end before + // AddPartitionsToTxn is issued. This should be rare and is + // most likely only to happen whenever a new transaction is + // starting from a not-in-transaction state (i.e., the first + // transaction). If we see InvalidTxnState in unsafe mode, we + // assume that a transaction was not actually begun and we + // return success. + // + // In Kafka, InvalidTxnState is also returned when producing + // non-transactional records from a producer that is currently + // in a transaction. + // + // All other cases it is returned is in EndTxn: + // * state == CompleteCommit and EndTxn != commit + // * state == CompleteAbort and EndTxn != abort + // * state == PrepareCommit and EndTxn != commit (otherwise, returns concurrent transactions) + // * state == PrepareAbort and EndTxn != abort (otherwise, returns concurrent transactions) + // * state == Empty + // + // This basically guards against the final case, all others are + // Kafka internal state transitioning and we should never hit + // them. + if how == EndBeginTxnUnsafe && resp.ErrorCode == kerr.InvalidTxnState.Code { + return nil + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable { + cl.failProducerID(id, epoch, err) + } + if err != nil || how != EndBeginTxnUnsafe { + return err + } + unblockPromises() + + // If we are end/beginning unsafely, then we need to re-add all + // partitions to a new transaction immediately. Timing makes it + // impossible to know what was truly added before EndTxn, so we + // pessimistically assume that every partition must be re-added. + // + // We track readd before the txn and swap those to un-added, but we + // also need to track anything that is newly added that raced with our + // EndTxn. We swap before the txn to ensure that *eventually*, + // partitions will be tracked as not in a transaction if people stop + // producing. + // + // We do this before the user callback because we *need* to start a new + // transaction within Kafka to ensure there will be a timeout. Per the + // unsafe aspect, the client could die or this request could error and + // there could be a stranded txn within Kafka's ProducerStateManager, + // but ideally the user will reconnect with the same txnal id. + cl.producer.readded = true + return cl.doWithConcurrentTransactions(ctx, "AddPartitionsToTxn", func() error { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + + for topic, parts := range cl.producer.topics.load() { + for i, part := range parts.load().partitions { + if part.records.addedToTxn.Load() { + readd[topic] = append(readd[topic], int32(i)) + } + } + } + + ps := make(map[int32]struct{}) + for topic, parts := range readd { + t := kmsg.NewAddPartitionsToTxnRequestTopic() + t.Topic = topic + for _, part := range parts { + ps[part] = struct{}{} + } + for p := range ps { + t.Partitions = append(t.Partitions, p) + delete(ps, p) + } + if len(t.Partitions) > 0 { + req.Topics = append(req.Topics, t) + } + } + + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + if err := kerr.ErrorForCode(p.ErrorCode); err != nil { + return err + } + } + } + return nil + }) +} + +// AbortBufferedRecords fails all unflushed records with ErrAborted and waits +// for there to be no buffered records. +// +// This accepts a context to quit the wait early, but quitting the wait may +// lead to an invalid state and should only be used if you are quitting your +// application. This function waits to abort records at safe points: if records +// are known to not be in flight. This function is safe to call multiple times +// concurrently, and safe to call concurrent with Flush. +// +// NOTE: This aborting record waits until all inflight requests have known +// responses. The client must wait to ensure no duplicate sequence number +// issues. For more details, and for an immediate alternative, check the +// documentation on UnsafeAbortBufferedRecords. +func (cl *Client) AbortBufferedRecords(ctx context.Context) error { + cl.producer.aborting.Add(1) + defer cl.producer.aborting.Add(-1) + + cl.cfg.logger.Log(LogLevelInfo, "producer state set to aborting; continuing to wait via flushing") + defer cl.cfg.logger.Log(LogLevelDebug, "aborted buffered records") + + // We must clear unknown topics ourselves, because flush just waits + // like normal. + p := &cl.producer + p.unknownTopicsMu.Lock() + for _, unknown := range p.unknownTopics { + select { + case unknown.fatal <- ErrAborting: + default: + } + } + p.unknownTopicsMu.Unlock() + + // Setting the aborting state allows records to fail before + // or after produce requests; thus, now we just flush. + return cl.Flush(ctx) +} + +// UnsafeAbortBufferedRecords fails all unflushed records with ErrAborted and +// waits for there to be no buffered records. This function does NOT wait for +// any inflight produce requests to finish, meaning topics in the client may be +// in an invalid state and producing to an invalid-state topic may cause the +// client to enter a fatal failed state. If you want to produce to topics that +// were unsafely aborted, it is recommended to use PurgeTopicsFromClient to +// forcefully reset the topics before producing to them again. +// +// When producing with idempotency enabled or with transactions, every record +// has a sequence number. The client must wait for inflight requests to have +// responses before failing a record, otherwise the client cannot know if a +// sequence number was seen by the broker and tracked or not seen by the broker +// and not tracked. By unsafely aborting, the client forcefully abandons all +// records, and producing to the topics again may re-use a sequence number and +// cause internal errors. +func (cl *Client) UnsafeAbortBufferedRecords() { + cl.failBufferedRecords(ErrAborting) +} + +// EndTransaction ends a transaction and resets the client's internal state to +// not be in a transaction. +// +// Flush and CommitOffsetsForTransaction must be called before this function; +// this function does not flush and does not itself ensure that all buffered +// records are flushed. If no record yet has caused a partition to be added to +// the transaction, this function does nothing and returns nil. Alternatively, +// AbortBufferedRecords should be called before aborting a transaction to +// ensure that any buffered records not yet flushed will not be a part of a new +// transaction. +// +// If the producer ID has an error and you are trying to commit, this will +// return with kerr.OperationNotAttempted. If this happened, retry +// EndTransaction with TryAbort. Not other error is retryable, and you should +// not retry with TryAbort. +// +// If records failed with UnknownProducerID and your Kafka version is at least +// 2.5, then aborting here will potentially allow the client to recover for +// more production. +// +// Note that canceling the context will likely leave the client in an +// undesirable state, because canceling the context may cancel the in-flight +// EndTransaction request, making it impossible to know whether the commit or +// abort was successful. It is recommended to not cancel the context. +func (cl *Client) EndTransaction(ctx context.Context, commit TransactionEndTry) error { + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + if !cl.producer.inTxn { + return nil + } + cl.producer.inTxn = false + + cl.producer.producingTxn.Store(false) // forbid any new produces while ending txn + + // anyAdded tracks if any partitions were added to this txn, because + // any partitions written to triggers AddPartitionToTxn, which triggers + // the txn to actually begin within Kafka. + // + // If we consumed at all but did not produce, the transaction ending + // issues AddOffsetsToTxn, which internally adds a __consumer_offsets + // partition to the transaction. Thus, if we added offsets, then we + // also produced. + var anyAdded bool + if g := cl.consumer.g; g != nil { + // We do not lock because we expect commitTransactionOffsets to + // be called *before* ending a transaction. + if g.offsetsAddedToTxn { + g.offsetsAddedToTxn = false + anyAdded = true + } + } else { + cl.cfg.logger.Log(LogLevelDebug, "transaction ending, no group loaded; this must be a producer-only transaction, not consume-modify-produce EOS") + } + + // After the flush, no records are being produced to, and we can set + // addedToTxn to false outside of any mutex. + for _, parts := range cl.producer.topics.load() { + for _, part := range parts.load().partitions { + anyAdded = part.records.addedToTxn.Swap(false) || anyAdded + } + } + + // If the user previously used EndAndBeginTransaction with + // EndBeginTxnUnsafe, we may have to end a transaction even though + // nothing may be in it. + anyAdded = anyAdded || cl.producer.readded + + // If no partition was added to a transaction, then we have nothing to commit. + // + // Note that anyAdded is true if the producer ID was failed, meaning we will + // get to the potential recovery logic below if necessary. + if !anyAdded { + cl.cfg.logger.Log(LogLevelDebug, "no records were produced during the commit; thus no transaction was began; ending without doing anything") + return nil + } + + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + if commit { + return kerr.OperationNotAttempted + } + + // If we recovered the producer ID, we return early, since + // there is no reason to issue an abort now that the id is + // different. Otherwise, we issue our EndTxn which will likely + // fail, but that is ok, we will just return error. + _, didRecover, _ := cl.maybeRecoverProducerID(ctx) + if didRecover { + return nil + } + } + + cl.cfg.logger.Log(LogLevelInfo, "ending transaction", + "transactional_id", *cl.cfg.txnID, + "producer_id", id, + "epoch", epoch, + "commit", commit, + ) + + cl.producer.readded = false + err = cl.doWithConcurrentTransactions(ctx, "EndTxn", func() error { + req := kmsg.NewPtrEndTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Commit = bool(commit) + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + + // If the returned error is still a Kafka error, this is fatal and we + // need to fail our producer ID we loaded above. + // + // UNKNOWN_SERVER_ERROR can theoretically be returned (not all brokers + // do). This technically is fatal, but we do not really know whether it + // is. We can just return this error and let the caller decide to + // continue, if the caller does continue, we will try something and + // eventually then receive our proper transactional error, if any. + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable && ke.Code != kerr.UnknownServerError.Code { + cl.failProducerID(id, epoch, err) + } + + return err +} + +// This returns if it is necessary to recover the producer ID (it has an +// error), whether it is possible to recover, and, if not, the error. +// +// We call this when beginning a transaction or when ending with an abort. +func (cl *Client) maybeRecoverProducerID(ctx context.Context) (necessary, did bool, err error) { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err == nil { + return false, false, nil + } + + var ke *kerr.Error + if ok := errors.As(err, &ke); !ok { + return true, false, err + } + + kip360 := cl.producer.idVersion >= 3 && (errors.Is(ke, kerr.UnknownProducerID) || errors.Is(ke, kerr.InvalidProducerIDMapping)) + kip588 := cl.producer.idVersion >= 4 && errors.Is(ke, kerr.InvalidProducerEpoch /* || err == kerr.TransactionTimedOut when implemented in Kafka */) + + recoverable := kip360 || kip588 + if !recoverable { + return true, false, err // fatal, unrecoverable + } + + // Storing errReloadProducerID will reset sequence numbers as appropriate + // when the producer ID is reloaded successfully. + cl.producer.id.Store(&producerID{ + id: id, + epoch: epoch, + err: errReloadProducerID, + }) + return true, true, nil +} + +// If a transaction is begun too quickly after finishing an old transaction, +// Kafka may still be finalizing its commit / abort and will return a +// concurrent transactions error. We handle that by retrying for a bit. +func (cl *Client) doWithConcurrentTransactions(ctx context.Context, name string, fn func() error) error { + start := time.Now() + tries := 0 + backoff := cl.cfg.txnBackoff + +start: + err := fn() + if errors.Is(err, kerr.ConcurrentTransactions) { + // The longer we are stalled, the more we enforce a minimum + // backoff. + since := time.Since(start) + switch { + case since > time.Second: + if backoff < 200*time.Millisecond { + backoff = 200 * time.Millisecond + } + case since > 5*time.Second/2: + if backoff < 500*time.Millisecond { + backoff = 500 * time.Millisecond + } + case since > 5*time.Second: + if backoff < time.Second { + backoff = time.Second + } + } + + tries++ + cl.cfg.logger.Log(LogLevelDebug, fmt.Sprintf("%s failed with CONCURRENT_TRANSACTIONS, which may be because we ended a txn and began producing in a new txn too quickly; backing off and retrying", name), + "backoff", backoff, + "since_request_tries_start", time.Since(start), + "tries", tries, + ) + select { + case <-time.After(backoff): + case <-ctx.Done(): + cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("abandoning %s retry due to request ctx quitting", name)) + return err + case <-cl.ctx.Done(): + cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("abandoning %s retry due to client ctx quitting", name)) + return err + } + goto start + } + return err +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// TRANSACTIONAL COMMITTING // +// MOSTLY DUPLICATED CODE DUE TO NO GENERICS AND BECAUSE THE TYPES ARE SLIGHTLY DIFFERENT // +//////////////////////////////////////////////////////////////////////////////////////////// + +// commitTransactionOffsets is exactly like CommitOffsets, but specifically for +// use with transactional consuming and producing. +// +// Since this function is a gigantic footgun if not done properly, we hide this +// and only allow transaction sessions to commit. +// +// Unlike CommitOffsets, we do not update the group's uncommitted map. We leave +// that to the calling code to do properly with SetOffsets depending on whether +// an eventual abort happens or not. +func (cl *Client) commitTransactionOffsets( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error), +) *groupConsumer { + cl.cfg.logger.Log(LogLevelDebug, "in commitTransactionOffsets", "with", uncommitted) + defer cl.cfg.logger.Log(LogLevelDebug, "left commitTransactionOffsets") + + if cl.cfg.txnID == nil { + onDone(nil, nil, errNotTransactional) + return nil + } + + // Before committing, ensure we are at least in a transaction. We + // unlock the producer txnMu before committing to allow EndTransaction + // to go through, even though that could cut off our commit. + cl.producer.txnMu.Lock() + var unlockedTxn bool + unlockTxn := func() { + if !unlockedTxn { + cl.producer.txnMu.Unlock() + } + unlockedTxn = true + } + defer unlockTxn() + if !cl.producer.inTxn { + onDone(nil, nil, errNotInTransaction) + return nil + } + + g := cl.consumer.g + if g == nil { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), errNotGroup) + return nil + } + + req, err := g.prepareTxnOffsetCommit(ctx, uncommitted) + if err != nil { + onDone(req, kmsg.NewPtrTxnOffsetCommitResponse(), err) + return g + } + if len(req.Topics) == 0 { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), nil) + return g + } + + if !g.offsetsAddedToTxn { + if err := cl.addOffsetsToTxn(ctx, g.cfg.group); err != nil { + if onDone != nil { + onDone(nil, nil, err) + } + return g + } + g.offsetsAddedToTxn = true + } + + unlockTxn() + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), err) + return nil + } + unblockJoinSync := func(req *kmsg.TxnOffsetCommitRequest, resp *kmsg.TxnOffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + onDone(req, resp, err) + } + g.mu.Lock() + defer g.mu.Unlock() + + g.commitTxn(ctx, req, unblockJoinSync) + return g +} + +// Ties a transactional producer to a group. Since this requires a producer ID, +// this initializes one if it is not yet initialized. This would only be the +// case if trying to commit before any records have been sent. +func (cl *Client) addOffsetsToTxn(ctx context.Context, group string) error { + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + return err + } + + err = cl.doWithConcurrentTransactions(ctx, "AddOffsetsToTxn", func() error { // committing offsets without producing causes a transaction to begin within Kafka + cl.cfg.logger.Log(LogLevelInfo, "issuing AddOffsetsToTxn", + "txn", *cl.cfg.txnID, + "producerID", id, + "producerEpoch", epoch, + "group", group, + ) + req := kmsg.NewPtrAddOffsetsToTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Group = group + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + + // If the returned error is still a Kafka error, this is fatal and we + // need to fail our producer ID we created just above. + // + // We special case UNKNOWN_SERVER_ERROR, because we do not really know + // if this is fatal. If it is, we will catch it later on a better + // error. Some brokers send this when things fail internally, we can + // just abort our commit and see if things are still bad in + // EndTransaction. + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable && ke.Code != kerr.UnknownServerError.Code { + cl.failProducerID(id, epoch, err) + } + + return err +} + +// commitTxn is ALMOST EXACTLY THE SAME as commit, but changed for txn types +// and we avoid updateCommitted. We avoid updating because we manually +// SetOffsets when ending the transaction. +func (g *groupConsumer) commitTxn(ctx context.Context, req *kmsg.TxnOffsetCommitRequest, onDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error)) { + if onDone == nil { // note we must always call onDone + onDone = func(_ *kmsg.TxnOffsetCommitRequest, _ *kmsg.TxnOffsetCommitResponse, _ error) {} + } + + if g.commitCancel != nil { + g.commitCancel() // cancel any prior commit + } + priorCancel := g.commitCancel + priorDone := g.commitDone + + // Unlike the non-txn consumer, we use the group context for + // transaction offset committing. We want to quit when the group is + // left, and we are not committing when leaving. We rely on proper + // usage of the GroupTransactSession API to issue commits, so there is + // no reason not to use the group context here. + commitCtx, commitCancel := context.WithCancel(g.ctx) // enable ours to be canceled and waited for + commitDone := make(chan struct{}) + + g.commitCancel = commitCancel + g.commitDone = commitDone + + if ctx.Done() != nil { + go func() { + select { + case <-ctx.Done(): + commitCancel() + case <-commitCtx.Done(): + } + }() + } + + go func() { + defer close(commitDone) // allow future commits to continue when we are done + defer commitCancel() + if priorDone != nil { + select { + case <-priorDone: + default: + g.cl.cfg.logger.Log(LogLevelDebug, "canceling prior txn offset commit to issue another") + priorCancel() + <-priorDone // wait for any prior request to finish + } + } + g.cl.cfg.logger.Log(LogLevelDebug, "issuing txn offset commit", "uncommitted", req) + + var resp *kmsg.TxnOffsetCommitResponse + var err error + if len(req.Topics) > 0 { + resp, err = req.RequestWith(commitCtx, g.cl) + } + if err != nil { + onDone(req, nil, err) + return + } + onDone(req, resp, nil) + }() +} + +func (g *groupConsumer) prepareTxnOffsetCommit(ctx context.Context, uncommitted map[string]map[int32]EpochOffset) (*kmsg.TxnOffsetCommitRequest, error) { + req := kmsg.NewPtrTxnOffsetCommitRequest() + + // We're now generating the producerID before addOffsetsToTxn. + // We will not make this request until after addOffsetsToTxn, but it's possible to fail here due to a failed producerID. + id, epoch, err := g.cl.producerID(ctx2fn(ctx)) + if err != nil { + return req, err + } + + req.TransactionalID = *g.cl.cfg.txnID + req.Group = g.cfg.group + req.ProducerID = id + req.ProducerEpoch = epoch + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + + for topic, partitions := range uncommitted { + reqTopic := kmsg.NewTxnOffsetCommitRequestTopic() + reqTopic.Topic = topic + for partition, eo := range partitions { + reqPartition := kmsg.NewTxnOffsetCommitRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.Offset = eo.Offset + reqPartition.LeaderEpoch = eo.Epoch + reqPartition.Metadata = &req.MemberID + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + req.Topics = append(req.Topics, reqTopic) + } + + if fn, ok := ctx.Value(txnCommitContextFn).(func(*kmsg.TxnOffsetCommitRequest) error); ok { + if err := fn(req); err != nil { + return req, err + } + } + return req, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE new file mode 100644 index 000000000000..36e18034325d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go new file mode 100644 index 000000000000..6bda2e61bd9d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go @@ -0,0 +1,423 @@ +// Package kmsg contains Kafka request and response types and autogenerated +// serialization and deserialization functions. +// +// This package may bump major versions whenever Kafka makes a backwards +// incompatible protocol change, per the types chosen for this package. For +// example, Kafka can change a field from non-nullable to nullable, which would +// require changing a field from a non-pointer to a pointer. We could get +// around this by making everything an opaque struct and having getters, but +// that is more tedious than having a few rare major version bumps. +// +// If you are using this package directly with kgo, you should either always +// use New functions, or Default functions after creating structs, or you +// should pin the max supported version. If you use New functions, you will +// have safe defaults as new fields are added. If you pin versions, you will +// avoid new fields being used. If you do neither of these, you may opt in to +// new fields that do not have safe zero value defaults, and this may lead to +// errors or unexpected results. +// +// Thus, whenever you initialize a struct from this package, do the following: +// +// struct := kmsg.NewFoo() +// struct.Field = "value I want to set" +// +// Most of this package is generated, but a few things are manual. What is +// manual: all interfaces, the RequestFormatter, record / message / record +// batch reading, and sticky member metadata serialization. +package kmsg + +import ( + "context" + "sort" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +//go:generate cp ../kbin/primitives.go internal/kbin/ + +// Requestor issues requests. Notably, the kgo.Client and kgo.Broker implements +// Requestor. All Requests in this package have a RequestWith function to have +// type-safe requests. +type Requestor interface { + // Request issues a Request and returns either a Response or an error. + Request(context.Context, Request) (Response, error) +} + +// Request represents a type that can be requested to Kafka. +type Request interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + // + // This function allows one to implement a client that chooses message + // versions based off of the max of a message's max version in the + // client and the broker's max supported version. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // ResponseKind returns an empty Response that is expected for + // this message request. + ResponseKind() Response +} + +// AdminRequest represents a request that must be issued to Kafka controllers. +type AdminRequest interface { + // IsAdminRequest is a method attached to requests that must be + // issed to Kafka controllers. + IsAdminRequest() + Request +} + +// GroupCoordinatorRequest represents a request that must be issued to a +// group coordinator. +type GroupCoordinatorRequest interface { + // IsGroupCoordinatorRequest is a method attached to requests that + // must be issued to group coordinators. + IsGroupCoordinatorRequest() + Request +} + +// TxnCoordinatorRequest represents a request that must be issued to a +// transaction coordinator. +type TxnCoordinatorRequest interface { + // IsTxnCoordinatorRequest is a method attached to requests that + // must be issued to transaction coordinators. + IsTxnCoordinatorRequest() + Request +} + +// Response represents a type that Kafka responds with. +type Response interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // RequestKind returns an empty Request that is expected for + // this message request. + RequestKind() Request +} + +// UnsafeReadFrom, implemented by all requests and responses generated in this +// package, switches to using unsafe slice-to-string conversions when reading. +// This can be used to avoid a lot of garbage, but it means to have to be +// careful when using any strings in structs: if you hold onto the string, the +// underlying response slice will not be garbage collected. +type UnsafeReadFrom interface { + UnsafeReadFrom([]byte) error +} + +// ThrottleResponse represents a response that could have a throttle applied by +// Kafka. Any response that implements ThrottleResponse also implements +// SetThrottleResponse. +// +// Kafka 2.0.0 switched throttles from being applied before responses to being +// applied after responses. +type ThrottleResponse interface { + // Throttle returns the response's throttle millis value and + // whether Kafka applies the throttle after the response. + Throttle() (int32, bool) +} + +// SetThrottleResponse sets the throttle in a response that can have a throttle +// applied. Any kmsg interface that implements ThrottleResponse also implements +// SetThrottleResponse. +type SetThrottleResponse interface { + // SetThrottle sets the response's throttle millis value. + SetThrottle(int32) +} + +// TimeoutRequest represents a request that has a TimeoutMillis field. +// Any request that implements TimeoutRequest also implements SetTimeoutRequest. +type TimeoutRequest interface { + // Timeout returns the request's timeout millis value. + Timeout() int32 +} + +// SetTimeoutRequest sets the timeout in a request that can have a timeout +// applied. Any kmsg interface that implements ThrottleRequest also implements +// SetThrottleRequest. +type SetTimeoutRequest interface { + // SetTimeout sets the request's timeout millis value. + SetTimeout(timeoutMillis int32) +} + +// RequestFormatter formats requests. +// +// The default empty struct works correctly, but can be extended with the +// NewRequestFormatter function. +type RequestFormatter struct { + clientID *string +} + +// RequestFormatterOpt applys options to a RequestFormatter. +type RequestFormatterOpt interface { + apply(*RequestFormatter) +} + +type formatterOpt struct{ fn func(*RequestFormatter) } + +func (opt formatterOpt) apply(f *RequestFormatter) { opt.fn(f) } + +// FormatterClientID attaches the given client ID to any issued request, +// minus controlled shutdown v0, which uses its own special format. +func FormatterClientID(id string) RequestFormatterOpt { + return formatterOpt{func(f *RequestFormatter) { f.clientID = &id }} +} + +// NewRequestFormatter returns a RequestFormatter with the opts applied. +func NewRequestFormatter(opts ...RequestFormatterOpt) *RequestFormatter { + a := new(RequestFormatter) + for _, opt := range opts { + opt.apply(a) + } + return a +} + +// AppendRequest appends a full message request to dst, returning the updated +// slice. This message is the full body that needs to be written to issue a +// Kafka request. +func (f *RequestFormatter) AppendRequest( + dst []byte, + r Request, + correlationID int32, +) []byte { + dst = append(dst, 0, 0, 0, 0) // reserve length + k := r.Key() + v := r.GetVersion() + dst = kbin.AppendInt16(dst, k) + dst = kbin.AppendInt16(dst, v) + dst = kbin.AppendInt32(dst, correlationID) + if k == 7 && v == 0 { + return dst + } + + // Even with flexible versions, we do not use a compact client id. + // Clients issue ApiVersions immediately before knowing the broker + // version, and old brokers will not be able to understand a compact + // client id. + dst = kbin.AppendNullableString(dst, f.clientID) + + // The flexible tags end the request header, and then begins the + // request body. + if r.IsFlexible() { + var numTags uint8 + dst = append(dst, numTags) + if numTags != 0 { + // TODO when tags are added + } + } + + // Now the request body. + dst = r.AppendTo(dst) + + kbin.AppendInt32(dst[:0], int32(len(dst[4:]))) + return dst +} + +// StringPtr is a helper to return a pointer to a string. +func StringPtr(in string) *string { + return &in +} + +// ReadFrom provides decoding various versions of sticky member metadata. A key +// point of this type is that it does not contain a version number inside it, +// but it is versioned: if decoding v1 fails, this falls back to v0. +func (s *StickyMemberMetadata) ReadFrom(src []byte) error { + return s.readFrom(src, false) +} + +// UnsafeReadFrom is the same as ReadFrom, but uses unsafe slice to string +// conversions to reduce garbage. +func (s *StickyMemberMetadata) UnsafeReadFrom(src []byte) error { + return s.readFrom(src, true) +} + +func (s *StickyMemberMetadata) readFrom(src []byte, unsafe bool) error { + b := kbin.Reader{Src: src} + numAssignments := b.ArrayLen() + if numAssignments < 0 { + numAssignments = 0 + } + need := numAssignments - int32(cap(s.CurrentAssignment)) + if need > 0 { + s.CurrentAssignment = append(s.CurrentAssignment[:cap(s.CurrentAssignment)], make([]StickyMemberMetadataCurrentAssignment, need)...) + } else { + s.CurrentAssignment = s.CurrentAssignment[:numAssignments] + } + for i := int32(0); i < numAssignments; i++ { + var topic string + if unsafe { + topic = b.UnsafeString() + } else { + topic = b.String() + } + numPartitions := b.ArrayLen() + if numPartitions < 0 { + numPartitions = 0 + } + a := &s.CurrentAssignment[i] + a.Topic = topic + need := numPartitions - int32(cap(a.Partitions)) + if need > 0 { + a.Partitions = append(a.Partitions[:cap(a.Partitions)], make([]int32, need)...) + } else { + a.Partitions = a.Partitions[:numPartitions] + } + for i := range a.Partitions { + a.Partitions[i] = b.Int32() + } + } + if len(b.Src) > 0 { + s.Generation = b.Int32() + } else { + s.Generation = -1 + } + return b.Complete() +} + +// AppendTo provides appending various versions of sticky member metadata to dst. +// If generation is not -1 (default for v0), this appends as version 1. +func (s *StickyMemberMetadata) AppendTo(dst []byte) []byte { + dst = kbin.AppendArrayLen(dst, len(s.CurrentAssignment)) + for _, assignment := range s.CurrentAssignment { + dst = kbin.AppendString(dst, assignment.Topic) + dst = kbin.AppendArrayLen(dst, len(assignment.Partitions)) + for _, partition := range assignment.Partitions { + dst = kbin.AppendInt32(dst, partition) + } + } + if s.Generation != -1 { + dst = kbin.AppendInt32(dst, s.Generation) + } + return dst +} + +// TagReader has is a type that has the ability to skip tags. +// +// This is effectively a trimmed version of the kbin.Reader, with the purpose +// being that kmsg cannot depend on an external package. +type TagReader interface { + // Uvarint returns a uint32. If the reader has read too much and has + // exhausted all bytes, this should set the reader's internal state + // to failed and return 0. + Uvarint() uint32 + + // Span returns n bytes from the reader. If the reader has read too + // much and exhausted all bytes this should set the reader's internal + // to failed and return nil. + Span(n int) []byte +} + +// SkipTags skips tags in a TagReader. +func SkipTags(b TagReader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// internalSkipTags skips tags in the duplicated inner kbin.Reader. +func internalSkipTags(b *kbin.Reader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// ReadTags reads tags in a TagReader and returns the tags. +func ReadTags(b TagReader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// internalReadTags reads tags in a reader and returns the tags from a +// duplicated inner kbin.Reader. +func internalReadTags(b *kbin.Reader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// Tags is an opaque structure capturing unparsed tags. +type Tags struct { + keyvals map[uint32][]byte +} + +// Len returns the number of keyvals in Tags. +func (t *Tags) Len() int { return len(t.keyvals) } + +// Each calls fn for each key and val in the tags. +func (t *Tags) Each(fn func(uint32, []byte)) { + if len(t.keyvals) == 0 { + return + } + // We must encode keys in order. We expect to have limited (no) unknown + // keys, so for now, we take a lazy approach and allocate an ordered + // slice. + ordered := make([]uint32, 0, len(t.keyvals)) + for key := range t.keyvals { + ordered = append(ordered, key) + } + sort.Slice(ordered, func(i, j int) bool { return ordered[i] < ordered[j] }) + for _, key := range ordered { + fn(key, t.keyvals[key]) + } +} + +// Set sets a tag's key and val. +// +// Note that serializing tags does NOT check if the set key overlaps with an +// existing used key. It is invalid to set a key used by Kafka itself. +func (t *Tags) Set(key uint32, val []byte) { + if t.keyvals == nil { + t.keyvals = make(map[uint32][]byte) + } + t.keyvals[key] = val +} + +// AppendEach appends each keyval in tags to dst and returns the updated dst. +func (t *Tags) AppendEach(dst []byte) []byte { + t.Each(func(key uint32, val []byte) { + dst = kbin.AppendUvarint(dst, key) + dst = kbin.AppendUvarint(dst, uint32(len(val))) + dst = append(dst, val...) + }) + return dst +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go new file mode 100644 index 000000000000..75bff9958e78 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go @@ -0,0 +1,46895 @@ +package kmsg + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +// Code generated by franz-go/generate. DO NOT EDIT. + +// MaxKey is the maximum key used for any messages in this package. +// Note that this value will change as Kafka adds more messages. +const MaxKey = 68 + +// MessageV0 is the message format Kafka used prior to 0.10. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +type MessageV0 struct { + // Offset is the offset of this record. + // + // If this is the outer message of a recursive message set (i.e. a + // message set has been compressed and this is the outer message), + // then the offset should be the offset of the last inner value. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 0. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV0) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV0) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV0) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV0) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV0. +func (v *MessageV0) Default() { +} + +// NewMessageV0 returns a default MessageV0 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV0() MessageV0 { + var v MessageV0 + v.Default() + return v +} + +// MessageV1 is the message format Kafka used prior to 0.11. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +// +// To support compression, an entire message set would be compressed and used +// as the Value in another message set (thus being "recursive"). The key for +// this outer message set must be null. +type MessageV1 struct { + // Offset is the offset of this record. + // + // Different from v0, if this message set is a recursive message set + // (that is, compressed and inside another message set), the offset + // on the inner set is relative to the offset of the outer set. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 1. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Timestamp is the millisecond timestamp of this message. + Timestamp int64 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV1) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV1) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV1) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV1) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Int64() + s.Timestamp = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV1. +func (v *MessageV1) Default() { +} + +// NewMessageV1 returns a default MessageV1 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV1() MessageV1 { + var v MessageV1 + v.Default() + return v +} + +// Header is user provided metadata for a record. Kafka does not look at +// headers at all; they are solely for producers and consumers. +type Header struct { + Key string + + Value []byte +} + +func (v *Header) AppendTo(dst []byte) []byte { + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + return dst +} + +func (v *Header) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Header) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Header) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Header. +func (v *Header) Default() { +} + +// NewHeader returns a default Header +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeader() Header { + var v Header + v.Default() + return v +} + +// RecordBatch is a Kafka concept that groups many individual records together +// in a more optimized format. +type RecordBatch struct { + // FirstOffset is the first offset in a record batch. + // + // For producing, this is usually 0. + FirstOffset int64 + + // Length is the wire length of everything that follows this field. + Length int32 + + // PartitionLeaderEpoch is the leader epoch of the broker at the time + // this batch was written. Kafka uses this for cluster communication, + // but clients can also use this to better aid truncation detection. + // See KIP-320. Producers should set this to -1. + PartitionLeaderEpoch int32 + + // Magic is the current "magic" number of this message format. + // The current magic number is 2. + Magic int8 + + // CRC is the crc of everything that follows this field using the + // Castagnoli polynomial. + CRC int32 + + // Attributes describe the records array of this batch. + // + // The first three bits correspond to compression: + // - 000 is no compression + // - 001 is gzip compression + // - 010 is snappy compression + // - 011 is lz4 compression + // - 100 is zstd compression (produce request version 7+) + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // Bit 5 indicates whether the batch is part of a transaction (1 is yes). + // + // Bit 6 indicates if the batch includes a control message (1 is yes). + // Control messages are used to enable transactions and are generated from + // the broker. Clients should not return control batches to applications. + Attributes int16 + + // LastOffsetDelta is the offset of the last message in a batch. This is used + // by the broker to ensure correct behavior even with batch compaction. + LastOffsetDelta int32 + + // FirstTimestamp is the timestamp (in milliseconds) of the first record + // in a batch. + FirstTimestamp int64 + + // MaxTimestamp is the timestamp (in milliseconds) of the last record + // in a batch. Similar to LastOffsetDelta, this is used to ensure correct + // behavior with compacting. + MaxTimestamp int64 + + // ProducerID is the broker assigned producerID from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // Note that when not using transactions, any producer here is always + // accepted (and the epoch is always zero). Outside transactions, the ID + // is used only to deduplicate requests (and there must be at max 5 + // concurrent requests). + ProducerID int64 + + // ProducerEpoch is the broker assigned producerEpoch from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + ProducerEpoch int16 + + // FirstSequence is the producer assigned sequence number used by the + // broker to deduplicate messages. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // The sequence number for each record in a batch is OffsetDelta + FirstSequence. + FirstSequence int32 + + // NumRecords is the number of records in the array below. + // + // This is separate from Records due to the potential for records to be + // compressed. + NumRecords int32 + + // Records contains records, either compressed or uncompressed. + // + // For uncompressed records, this is an array of records ([Record]). + // + // For compressed records, the length of the uncompressed array is kept + // but everything that follows is compressed. + // + // The number of bytes is expected to be the Length field minus 49. + Records []byte +} + +func (v *RecordBatch) AppendTo(dst []byte) []byte { + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Length + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PartitionLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LastOffsetDelta + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FirstTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.FirstSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.NumRecords + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + dst = append(dst, v...) + } + return dst +} + +func (v *RecordBatch) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RecordBatch) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RecordBatch) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.FirstOffset = v + } + { + v := b.Int32() + s.Length = v + } + { + v := b.Int32() + s.PartitionLeaderEpoch = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int16() + s.Attributes = v + } + { + v := b.Int32() + s.LastOffsetDelta = v + } + { + v := b.Int64() + s.FirstTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.FirstSequence = v + } + { + v := b.Int32() + s.NumRecords = v + } + { + v := b.Span(int(s.Length) - 49) + s.Records = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RecordBatch. +func (v *RecordBatch) Default() { +} + +// NewRecordBatch returns a default RecordBatch +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecordBatch() RecordBatch { + var v RecordBatch + v.Default() + return v +} + +// OffsetCommitKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 0 or 1. +// +// This type was introduced in KAFKA-1012 commit a670537aa3 with release 0.8.2 +// and has been in use ever since. +type OffsetCommitKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group being committed. + Group string + + // Topic is the topic being committed. + Topic string + + // Partition is the partition being committed. + Partition int32 +} + +func (v *OffsetCommitKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *OffsetCommitKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitKey. +func (v *OffsetCommitKey) Default() { +} + +// NewOffsetCommitKey returns a default OffsetCommitKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitKey() OffsetCommitKey { + var v OffsetCommitKey + v.Default() + return v +} + +// OffsetCommitValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of OffsetCommitKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-1634 commit c5df2a8e3a in 0.9.0 released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7437 commit 9f7267dd2f, proposed in KIP-320 and included in 2.1.0 +// released version 3. +type OffsetCommitValue struct { + // Version is which encoding version this value is using. + Version int16 + + // Offset is the committed offset. + Offset int64 + + // LeaderEpoch is the epoch of the leader committing this message. + LeaderEpoch int32 // v3+ + + // Metadata is the metadata included in the commit. + Metadata string + + // CommitTimestamp is when this commit occurred. + CommitTimestamp int64 + + // ExpireTimestamp, introduced in v1 and dropped in v2 with KIP-111, + // is when this commit expires. + ExpireTimestamp int64 // v1-v1 +} + +func (v *OffsetCommitValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + dst = kbin.AppendString(dst, v) + } + { + v := v.CommitTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.ExpireTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *OffsetCommitValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.Offset = v + } + if version >= 3 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Metadata = v + } + { + v := b.Int64() + s.CommitTimestamp = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.ExpireTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitValue. +func (v *OffsetCommitValue) Default() { +} + +// NewOffsetCommitValue returns a default OffsetCommitValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitValue() OffsetCommitValue { + var v OffsetCommitValue + v.Default() + return v +} + +// GroupMetadataKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 2. +// +// This type was introduced in KAFKA-2017 commit 7c33475274 with release 0.9.0 +// and has been in use ever since. +type GroupMetadataKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group this metadata is for. + Group string +} + +func (v *GroupMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *GroupMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataKey. +func (v *GroupMetadataKey) Default() { +} + +// NewGroupMetadataKey returns a default GroupMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataKey() GroupMetadataKey { + var v GroupMetadataKey + v.Default() + return v +} + +type GroupMetadataValueMember struct { + // MemberID is a group member. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ClientID is the client ID of this group member. + ClientID string + + // ClientHost is the hostname of this group member. + ClientHost string + + // RebalanceTimeoutMillis is the rebalance timeout of this group member. + RebalanceTimeoutMillis int32 // v1+ + + // SessionTimeoutMillis is the session timeout of this group member. + SessionTimeoutMillis int32 + + // Subscription is the subscription of this group member. + Subscription []byte + + // Assignment is what the leader assigned this group member. + Assignment []byte +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValueMember. +func (v *GroupMetadataValueMember) Default() { +} + +// NewGroupMetadataValueMember returns a default GroupMetadataValueMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValueMember() GroupMetadataValueMember { + var v GroupMetadataValueMember + v.Default() + return v +} + +// GroupMetadataValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of GroupMetadataKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-3888 commit 40b1dd3f49, proposed in KIP-62 and included in 0.10.1 +// released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7862 commit 0f995ba6be, proposed in KIP-345 and included in 2.3.0 +// released version 3. +type GroupMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProtocolType is the type of protocol being used for the group + // (i.e., "consumer"). + ProtocolType string + + // Generation is the generation of this group. + Generation int32 + + // Protocol is the agreed upon protocol all members are using to partition + // (i.e., "sticky"). + Protocol *string + + // Leader is the group leader. + Leader *string + + // CurrentStateTimestamp is the timestamp for this state of the group + // (stable, etc.). + CurrentStateTimestamp int64 // v2+ + + // Members are the group members. + Members []GroupMetadataValueMember +} + +func (v *GroupMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProtocolType + dst = kbin.AppendString(dst, v) + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Protocol + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Leader + dst = kbin.AppendNullableString(dst, v) + } + if version >= 2 { + v := v.CurrentStateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Members + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.MemberID + dst = kbin.AppendString(dst, v) + } + if version >= 3 { + v := v.InstanceID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.ClientID + dst = kbin.AppendString(dst, v) + } + { + v := v.ClientHost + dst = kbin.AppendString(dst, v) + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Subscription + dst = kbin.AppendBytes(dst, v) + } + { + v := v.Assignment + dst = kbin.AppendBytes(dst, v) + } + } + } + return dst +} + +func (v *GroupMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ProtocolType = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Protocol = v + } + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Leader = v + } + if version >= 2 { + v := b.Int64() + s.CurrentStateTimestamp = v + } + { + v := s.Members + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]GroupMetadataValueMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.MemberID = v + } + if version >= 3 { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.InstanceID = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ClientID = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.ClientHost = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + { + v := b.Bytes() + s.Subscription = v + } + { + v := b.Bytes() + s.Assignment = v + } + } + v = a + s.Members = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValue. +func (v *GroupMetadataValue) Default() { +} + +// NewGroupMetadataValue returns a default GroupMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValue() GroupMetadataValue { + var v GroupMetadataValue + v.Default() + return v +} + +// TxnMetadataKey is the key for the Kafka internal __transaction_state topic +// if the key starts with an int16 with a value of 0. +type TxnMetadataKey struct { + // Version is the version of this type. + Version int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string +} + +func (v *TxnMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *TxnMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.TransactionalID = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataKey. +func (v *TxnMetadataKey) Default() { +} + +// NewTxnMetadataKey returns a default TxnMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataKey() TxnMetadataKey { + var v TxnMetadataKey + v.Default() + return v +} + +type TxnMetadataValueTopic struct { + // Topic is a topic involved in this transaction. + Topic string + + // Partitions are partitions in this topic involved in the transaction. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValueTopic. +func (v *TxnMetadataValueTopic) Default() { +} + +// NewTxnMetadataValueTopic returns a default TxnMetadataValueTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValueTopic() TxnMetadataValueTopic { + var v TxnMetadataValueTopic + v.Default() + return v +} + +// TxnMetadataValue is the value for the Kafka internal __transaction_state +// topic if the key is of TxnMetadataKey type. +type TxnMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // State is the state this transaction is in, + // 0 is Empty, 1 is Ongoing, 2 is PrepareCommit, 3 is PrepareAbort, 4 is + // CompleteCommit, 5 is CompleteAbort, 6 is Dead, and 7 is PrepareEpochFence. + State TransactionState + + // Topics are topics that are involved in this transaction. + Topics []TxnMetadataValueTopic + + // LastUpdateTimestamp is the timestamp in millis of when this transaction + // was last updated. + LastUpdateTimestamp int64 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 +} + +func (v *TxnMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.State + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.LastUpdateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *TxnMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + var t TransactionState + { + v := b.Int8() + t = TransactionState(v) + } + v := t + s.State = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnMetadataValueTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.Int64() + s.LastUpdateTimestamp = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValue. +func (v *TxnMetadataValue) Default() { +} + +// NewTxnMetadataValue returns a default TxnMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValue() TxnMetadataValue { + var v TxnMetadataValue + v.Default() + return v +} + +type StickyMemberMetadataCurrentAssignment struct { + // Topic is a topic the group member is currently assigned. + Topic string + + // Partitions are the partitions within a topic that a group member is + // currently assigned. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadataCurrentAssignment. +func (v *StickyMemberMetadataCurrentAssignment) Default() { +} + +// NewStickyMemberMetadataCurrentAssignment returns a default StickyMemberMetadataCurrentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadataCurrentAssignment() StickyMemberMetadataCurrentAssignment { + var v StickyMemberMetadataCurrentAssignment + v.Default() + return v +} + +// StickyMemberMetadata is is what is encoded in UserData for +// ConsumerMemberMetadata in group join requests with the sticky partitioning +// strategy. +// +// V1 added generation, which fixed a bug with flaky group members joining +// repeatedly. See KIP-341 for more details. +// +// Note that clients should always try decoding as v1 and, if that fails, +// fall back to v0. This is necessary due to there being no version number +// anywhere in this type. +type StickyMemberMetadata struct { + // CurrentAssignment is the assignment that a group member has when + // issuing a join. + CurrentAssignment []StickyMemberMetadataCurrentAssignment + + // Generation is the generation of this join. This is incremented every join. + // + // This field has a default of -1. + Generation int32 // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadata. +func (v *StickyMemberMetadata) Default() { + v.Generation = -1 +} + +// NewStickyMemberMetadata returns a default StickyMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadata() StickyMemberMetadata { + var v StickyMemberMetadata + v.Default() + return v +} + +type ConsumerMemberMetadataOwnedPartition struct { + Topic string + + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadataOwnedPartition. +func (v *ConsumerMemberMetadataOwnedPartition) Default() { +} + +// NewConsumerMemberMetadataOwnedPartition returns a default ConsumerMemberMetadataOwnedPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadataOwnedPartition() ConsumerMemberMetadataOwnedPartition { + var v ConsumerMemberMetadataOwnedPartition + v.Default() + return v +} + +// ConsumerMemberMetadata is the metadata that is usually sent with a join group +// request with the "consumer" protocol (normal, non-connect consumers). +type ConsumerMemberMetadata struct { + // Version is 0, 1, 2, or 3. + Version int16 + + // Topics is the list of topics in the group that this member is interested + // in consuming. + Topics []string + + // UserData is arbitrary client data for a given client in the group. + // For sticky assignment, this is StickyMemberMetadata. + UserData []byte + + // OwnedPartitions, introduced for KIP-429, are the partitions that this + // member currently owns. + OwnedPartitions []ConsumerMemberMetadataOwnedPartition // v1+ + + // Generation is the generation of the group. + // + // This field has a default of -1. + Generation int32 // v2+ + + // Rack, if non-nil, opts into rack-aware replica assignment. + Rack *string // v3+ +} + +func (v *ConsumerMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + if version >= 1 { + v := v.OwnedPartitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + if version >= 2 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.Rack + dst = kbin.AppendNullableString(dst, v) + } + return dst +} + +func (v *ConsumerMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + if version >= 1 { + v := s.OwnedPartitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberMetadataOwnedPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.OwnedPartitions = v + } + if version >= 2 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Rack = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadata. +func (v *ConsumerMemberMetadata) Default() { + v.Generation = -1 +} + +// NewConsumerMemberMetadata returns a default ConsumerMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadata() ConsumerMemberMetadata { + var v ConsumerMemberMetadata + v.Default() + return v +} + +type ConsumerMemberAssignmentTopic struct { + // Topic is a topic in the assignment. + Topic string + + // Partitions contains partitions in the assignment. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignmentTopic. +func (v *ConsumerMemberAssignmentTopic) Default() { +} + +// NewConsumerMemberAssignmentTopic returns a default ConsumerMemberAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignmentTopic() ConsumerMemberAssignmentTopic { + var v ConsumerMemberAssignmentTopic + v.Default() + return v +} + +// ConsumerMemberAssignment is the assignment data that is usually sent with a +// sync group request with the "consumer" protocol (normal, non-connect +// consumers). +type ConsumerMemberAssignment struct { + // Verson is 0, 1, or 2. + Version int16 + + // Topics contains topics in the assignment. + Topics []ConsumerMemberAssignmentTopic + + // UserData is arbitrary client data for a given client in the group. + UserData []byte +} + +func (v *ConsumerMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConsumerMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignment. +func (v *ConsumerMemberAssignment) Default() { +} + +// NewConsumerMemberAssignment returns a default ConsumerMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignment() ConsumerMemberAssignment { + var v ConsumerMemberAssignment + v.Default() + return v +} + +// ConnectMemberMetadata is the metadata used in a join group request with the +// "connect" protocol. v1 introduced incremental cooperative rebalancing (akin +// to cooperative-sticky) per KIP-415. +// +// v0 defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java +// v1+ defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeConnectProtocol.java +type ConnectMemberMetadata struct { + Version int16 + + URL string + + ConfigOffset int64 + + CurrentAssignment []byte // v1+ +} + +func (v *ConnectMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.URL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.CurrentAssignment + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConnectMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.URL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + if version >= 1 { + v := b.NullableBytes() + s.CurrentAssignment = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberMetadata. +func (v *ConnectMemberMetadata) Default() { +} + +// NewConnectMemberMetadata returns a default ConnectMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberMetadata() ConnectMemberMetadata { + var v ConnectMemberMetadata + v.Default() + return v +} + +type ConnectMemberAssignmentAssignment struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentAssignment. +func (v *ConnectMemberAssignmentAssignment) Default() { +} + +// NewConnectMemberAssignmentAssignment returns a default ConnectMemberAssignmentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentAssignment() ConnectMemberAssignmentAssignment { + var v ConnectMemberAssignmentAssignment + v.Default() + return v +} + +type ConnectMemberAssignmentRevoked struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentRevoked. +func (v *ConnectMemberAssignmentRevoked) Default() { +} + +// NewConnectMemberAssignmentRevoked returns a default ConnectMemberAssignmentRevoked +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentRevoked() ConnectMemberAssignmentRevoked { + var v ConnectMemberAssignmentRevoked + v.Default() + return v +} + +// ConnectMemberAssignment is the assignment that is used in a sync group +// request with the "connect" protocol. See ConnectMemberMetadata for links to +// the Kafka code where these fields are defined. +type ConnectMemberAssignment struct { + Version int16 + + Error int16 + + Leader string + + LeaderURL string + + ConfigOffset int64 + + Assignment []ConnectMemberAssignmentAssignment + + Revoked []ConnectMemberAssignmentRevoked // v1+ + + ScheduledDelay int32 // v1+ +} + +func (v *ConnectMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Error + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Leader + dst = kbin.AppendString(dst, v) + } + { + v := v.LeaderURL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Assignment + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.Revoked + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.ScheduledDelay + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *ConnectMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.Error = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Leader = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.LeaderURL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + { + v := s.Assignment + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Assignment = v + } + if version >= 1 { + v := s.Revoked + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentRevoked, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Revoked = v + } + if version >= 1 { + v := b.Int32() + s.ScheduledDelay = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignment. +func (v *ConnectMemberAssignment) Default() { +} + +// NewConnectMemberAssignment returns a default ConnectMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignment() ConnectMemberAssignment { + var v ConnectMemberAssignment + v.Default() + return v +} + +// DefaultPrincipalData is the encoded principal data. This is used in an +// envelope request from broker to broker. +type DefaultPrincipalData struct { + Version int16 + + // The principal type. + Type string + + // The principal name. + Name string + + // Whether the principal was authenticated by a delegation token on the forwarding broker. + TokenAuthenticated bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *DefaultPrincipalData) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TokenAuthenticated + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DefaultPrincipalData) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DefaultPrincipalData) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DefaultPrincipalData) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Bool() + s.TokenAuthenticated = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *DefaultPrincipalData) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DefaultPrincipalData. +func (v *DefaultPrincipalData) Default() { +} + +// NewDefaultPrincipalData returns a default DefaultPrincipalData +// This is a shortcut for creating a struct and calling Default yourself. +func NewDefaultPrincipalData() DefaultPrincipalData { + var v DefaultPrincipalData + v.Default() + return v +} + +// ControlRecordKey is the key in a control record. +type ControlRecordKey struct { + Version int16 + + Type ControlRecordKeyType +} + +func (v *ControlRecordKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + return dst +} + +func (v *ControlRecordKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlRecordKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlRecordKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var t ControlRecordKeyType + { + v := b.Int8() + t = ControlRecordKeyType(v) + } + v := t + s.Type = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlRecordKey. +func (v *ControlRecordKey) Default() { +} + +// NewControlRecordKey returns a default ControlRecordKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlRecordKey() ControlRecordKey { + var v ControlRecordKey + v.Default() + return v +} + +// EndTxnMarker is the value for a control record when the key is type 0 or 1. +type EndTxnMarker struct { + Version int16 + + CoordinatorEpoch int32 +} + +func (v *EndTxnMarker) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *EndTxnMarker) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnMarker) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnMarker) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int32() + s.CoordinatorEpoch = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnMarker. +func (v *EndTxnMarker) Default() { +} + +// NewEndTxnMarker returns a default EndTxnMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnMarker() EndTxnMarker { + var v EndTxnMarker + v.Default() + return v +} + +type LeaderChangeMessageVoter struct { + VoterID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessageVoter. +func (v *LeaderChangeMessageVoter) Default() { +} + +// NewLeaderChangeMessageVoter returns a default LeaderChangeMessageVoter +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessageVoter() LeaderChangeMessageVoter { + var v LeaderChangeMessageVoter + v.Default() + return v +} + +// LeaderChangeMessage is the value for a control record when the key is type 3. +type LeaderChangeMessage struct { + Version int16 + + // The ID of the newly elected leader. + LeaderID int32 + + // The set of voters in the quorum for this epoch. + Voters []LeaderChangeMessageVoter + + // The voters who voted for the leader at the time of election. + GrantingVoters []LeaderChangeMessageVoter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *LeaderChangeMessage) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Voters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.GrantingVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderChangeMessage) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderChangeMessage) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderChangeMessage) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := s.Voters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Voters = v + } + { + v := s.GrantingVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GrantingVoters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *LeaderChangeMessage) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessage. +func (v *LeaderChangeMessage) Default() { +} + +// NewLeaderChangeMessage returns a default LeaderChangeMessage +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessage() LeaderChangeMessage { + var v LeaderChangeMessage + v.Default() + return v +} + +type ProduceRequestTopicPartition struct { + // Partition is a partition to send a record batch to. + Partition int32 + + // Records is a batch of records to write to a topic's partition. + // + // For Kafka pre 0.11.0, the contents of the byte array is a serialized + // message set. At or after 0.11.0, the contents of the byte array is a + // serialized RecordBatch. + Records []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopicPartition. +func (v *ProduceRequestTopicPartition) Default() { +} + +// NewProduceRequestTopicPartition returns a default ProduceRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopicPartition() ProduceRequestTopicPartition { + var v ProduceRequestTopicPartition + v.Default() + return v +} + +type ProduceRequestTopic struct { + // Topic is a topic to send record batches to. + Topic string + + // Partitions is an array of partitions to send record batches to. + Partitions []ProduceRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopic. +func (v *ProduceRequestTopic) Default() { +} + +// NewProduceRequestTopic returns a default ProduceRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopic() ProduceRequestTopic { + var v ProduceRequestTopic + v.Default() + return v +} + +// ProduceRequest issues records to be created to Kafka. +// +// Kafka 0.10.0 (v2) changed Records from MessageSet v0 to MessageSet v1. +// Kafka 0.11.0 (v3) again changed Records to RecordBatch. +// +// Note that the special client ID "__admin_client" will allow you to produce +// records to internal topics. This is generally recommended if you want to +// break your Kafka cluster. +type ProduceRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionID is the transaction ID to use for this request, allowing for + // exactly once semantics. + TransactionID *string // v3+ + + // Acks specifies the number of acks that the partition leaders must receive + // from in sync replicas before considering a record batch fully written. + // + // Valid values are -1, 0, or 1 corresponding to all, none, or the leader only. + // + // Note that if no acks are requested, Kafka will close the connection + // if any topic or partition errors to trigger a client metadata refresh. + Acks int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // Topics is an array of topics to send record batches to. + Topics []ProduceRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceRequest) Key() int16 { return 0 } +func (*ProduceRequest) MaxVersion() int16 { return 10 } +func (v *ProduceRequest) SetVersion(version int16) { v.Version = version } +func (v *ProduceRequest) GetVersion() int16 { return v.Version } +func (v *ProduceRequest) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ProduceRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ProduceRequest) ResponseKind() Response { + r := &ProduceResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ProduceRequest) RequestWith(ctx context.Context, r Requestor) (*ProduceResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ProduceResponse) + return resp, err +} + +func (v *ProduceRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.TransactionID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Acks + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionID = v + } + { + v := b.Int16() + s.Acks = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.Records = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrProduceRequest returns a pointer to a default ProduceRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceRequest() *ProduceRequest { + var v ProduceRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequest. +func (v *ProduceRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewProduceRequest returns a default ProduceRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequest() ProduceRequest { + var v ProduceRequest + v.Default() + return v +} + +type ProduceResponseTopicPartitionErrorRecord struct { + // RelativeOffset is the offset of the record that caused problems. + RelativeOffset int32 + + // ErrorMessage is the error of this record. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionErrorRecord. +func (v *ProduceResponseTopicPartitionErrorRecord) Default() { +} + +// NewProduceResponseTopicPartitionErrorRecord returns a default ProduceResponseTopicPartitionErrorRecord +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionErrorRecord() ProduceResponseTopicPartitionErrorRecord { + var v ProduceResponseTopicPartitionErrorRecord + v.Default() + return v +} + +type ProduceResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionCurrentLeader. +func (v *ProduceResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewProduceResponseTopicPartitionCurrentLeader returns a default ProduceResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionCurrentLeader() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type ProduceResponseTopicPartition struct { + // Partition is the partition this response pertains to. + Partition int32 + + // ErrorCode is any error for a topic/partition in the request. + // There are many error codes for produce requests. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for all topics and + // partitions if the request had a transactional ID but the client + // is not authorized for transactions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned for all topics and partitions + // if the request was idempotent but the client is not authorized + // for idempotent requests. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics the client + // is not authorized to talk to. + // + // INVALID_REQUIRED_ACKS is returned if the request contained an invalid + // number for "acks". + // + // CORRUPT_MESSAGE is returned for many reasons, generally related to + // problems with messages (invalid magic, size mismatch, etc.). + // + // MESSAGE_TOO_LARGE is returned if a record batch is larger than the + // broker's configured max.message.size. + // + // RECORD_LIST_TOO_LARGE is returned if the record batch is larger than + // the broker's segment.bytes. + // + // INVALID_TIMESTAMP is returned if the record batch uses LogAppendTime + // or if the timestamp delta from when the broker receives the message + // is more than the broker's log.message.timestamp.difference.max.ms. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if using a Kafka v2 message + // format (i.e. RecordBatch) feature (idempotence) while sending v1 + // messages (i.e. a MessageSet). + // + // KAFKA_STORAGE_ERROR is returned if the log directory for a partition + // is offline. + // + // NOT_ENOUGH_REPLICAS is returned if all acks are required, but there + // are not enough in sync replicas yet. + // + // NOT_ENOUGH_REPLICAS_AFTER_APPEND is returned on old Kafka versions + // (pre 0.11.0.0) when a message was written to disk and then Kafka + // noticed not enough replicas existed to replicate the message. + // + // DUPLICATE_SEQUENCE_NUMBER is returned for Kafka <1.1.0 when a + // sequence number is detected as a duplicate. After, out of order + // is returned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // is unknown. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // + // INVALID_PRODUCER_EPOCH is returned if the produce request was + // attempted with an old epoch. Either there is a newer producer using + // the same transaction ID, or the transaction ID used has expired. + // + // UNKNOWN_PRODUCER_ID, added in Kafka 1.0.0 (message format v5+) is + // returned if the producer used an ID that Kafka does not know about or + // if the request has a larger sequence number than Kafka expects. The + // LogStartOffset must be checked in this case. If the offset is greater + // than the last acknowledged offset, then no data loss has occurred; the + // client just sent data so long ago that Kafka rotated the partition out + // of existence and no longer knows of this producer ID. In this case, + // reset your sequence numbers to 0. If the log start offset is equal to + // or less than what the client sent prior, then data loss has occurred. + // See KAFKA-5793 for more details. NOTE: Unfortunately, even UNKNOWN_PRODUCER_ID + // is unsafe to handle, so this error should likely be treated the same + // as OUT_OF_ORDER_SEQUENCE_NUMER. See KIP-360 for more details. + // + // OUT_OF_ORDER_SEQUENCE_NUMBER is sent if the batch's FirstSequence was + // not what it should be (the last FirstSequence, plus the number of + // records in the last batch, plus one). After 1.0.0, this generally + // means data loss. Before, there could be confusion on if the broker + // actually rotated the partition out of existence (this is why + // UNKNOWN_PRODUCER_ID was introduced). + ErrorCode int16 + + // BaseOffset is the offset that the records in the produce request began + // at in the partition. + BaseOffset int64 + + // LogAppendTime is the millisecond that records were appended to the + // partition inside Kafka. This is only not -1 if records were written + // with the log append time flag (which producers cannot do). + // + // This field has a default of -1. + LogAppendTime int64 // v2+ + + // LogStartOffset, introduced in Kafka 1.0.0, can be used to see if an + // UNKNOWN_PRODUCER_ID means Kafka rotated records containing the used + // producer ID out of existence, or if Kafka lost data. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // ErrorRecords are indices of individual records that caused a batch + // to error. This was added for KIP-467. + ErrorRecords []ProduceResponseTopicPartitionErrorRecord // v8+ + + // ErrorMessage is the global error message of of what caused this batch + // to error. + ErrorMessage *string // v8+ + + CurrentLeader ProduceResponseTopicPartitionCurrentLeader // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartition. +func (v *ProduceResponseTopicPartition) Default() { + v.LogAppendTime = -1 + v.LogStartOffset = -1 + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } +} + +// NewProduceResponseTopicPartition returns a default ProduceResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartition() ProduceResponseTopicPartition { + var v ProduceResponseTopicPartition + v.Default() + return v +} + +type ProduceResponseTopic struct { + // Topic is the topic this response pertains to. + Topic string + + // Partitions is an array of responses for the partition's that + // batches were sent to. + Partitions []ProduceResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopic. +func (v *ProduceResponseTopic) Default() { +} + +// NewProduceResponseTopic returns a default ProduceResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopic() ProduceResponseTopic { + var v ProduceResponseTopic + v.Default() + return v +} + +type ProduceResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseBroker. +func (v *ProduceResponseBroker) Default() { +} + +// NewProduceResponseBroker returns a default ProduceResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseBroker() ProduceResponseBroker { + var v ProduceResponseBroker + v.Default() + return v +} + +// ProduceResponse is returned from a ProduceRequest. +type ProduceResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of responses for the topic's that batches were sent + // to. + Topics []ProduceResponseTopic + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v1+ + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []ProduceResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceResponse) Key() int16 { return 0 } +func (*ProduceResponse) MaxVersion() int16 { return 10 } +func (v *ProduceResponse) SetVersion(version int16) { v.Version = version } +func (v *ProduceResponse) GetVersion() int16 { return v.Version } +func (v *ProduceResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *ProduceResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ProduceResponse) RequestKind() Request { return &ProduceRequest{Version: v.Version} } + +func (v *ProduceResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BaseOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LogAppendTime + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 8 { + v := v.ErrorRecords + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.RelativeOffset + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BaseOffset = v + } + if version >= 2 { + v := b.Int64() + s.LogAppendTime = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 8 { + v := s.ErrorRecords + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartitionErrorRecord, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.RelativeOffset = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ErrorRecords = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrProduceResponse returns a pointer to a default ProduceResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceResponse() *ProduceResponse { + var v ProduceResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponse. +func (v *ProduceResponse) Default() { +} + +// NewProduceResponse returns a default ProduceResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponse() ProduceResponse { + var v ProduceResponse + v.Default() + return v +} + +type FetchRequestReplicaState struct { + // The replica ID of the follower, or -1 if this request is from a consumer. + // + // This field has a default of -1. + ID int32 + + // The epoch of this follower, or -1 if not available. + // + // This field has a default of -1. + Epoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestReplicaState. +func (v *FetchRequestReplicaState) Default() { + v.ID = -1 + v.Epoch = -1 +} + +// NewFetchRequestReplicaState returns a default FetchRequestReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestReplicaState() FetchRequestReplicaState { + var v FetchRequestReplicaState + v.Default() + return v +} + +type FetchRequestTopicPartition struct { + // Partition is a partition in a topic to try to fetch records for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v9+ + + // FetchOffset is the offset to begin the fetch from. Kafka will + // return records at and after this offset. + FetchOffset int64 + + // The epoch of the last fetched record, or -1 if there is none. + // + // This field has a default of -1. + LastFetchedEpoch int32 // v12+ + + // LogStartOffset is a broker-follower only field added for KIP-107. + // This is the start offset of the partition in a follower. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // PartitionMaxBytes is the maximum bytes to return for this partition. + // This can be used to limit how many bytes an individual partition in + // a request is allotted so that it does not dominate all of MaxBytes. + PartitionMaxBytes int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopicPartition. +func (v *FetchRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.LastFetchedEpoch = -1 + v.LogStartOffset = -1 +} + +// NewFetchRequestTopicPartition returns a default FetchRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopicPartition() FetchRequestTopicPartition { + var v FetchRequestTopicPartition + v.Default() + return v +} + +type FetchRequestTopic struct { + // Topic is a topic to try to fetch records for. + Topic string // v0-v12 + + // TopicID is the uuid of the topic to fetch records for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic to try to fetch records for. + Partitions []FetchRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopic. +func (v *FetchRequestTopic) Default() { +} + +// NewFetchRequestTopic returns a default FetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopic() FetchRequestTopic { + var v FetchRequestTopic + v.Default() + return v +} + +type FetchRequestForgottenTopic struct { + // Topic is a topic to remove from being tracked (with the partitions below). + Topic string // v7-v12 + + // TopicID is the uuid of a topic to remove from being tracked (with the + // partitions below). + TopicID [16]byte // v13+ + + // Partitions are partitions to remove from tracking for a topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestForgottenTopic. +func (v *FetchRequestForgottenTopic) Default() { +} + +// NewFetchRequestForgottenTopic returns a default FetchRequestForgottenTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestForgottenTopic() FetchRequestForgottenTopic { + var v FetchRequestForgottenTopic + v.Default() + return v +} + +// FetchRequest is a long-poll request of records from Kafka. +// +// Kafka 0.11.0.0 released v4 and changed the returned RecordBatches to contain +// the RecordBatch type. Prior, Kafka used the MessageSet type (and, for v0 and +// v1, Kafka used a different type). +// +// Note that starting in v3, Kafka began processing partitions in order, +// meaning the order of partitions in the fetch request is important due to +// potential size constraints. +// +// Starting in v13, topics must use UUIDs rather than their string name +// identifiers. +// +// Version 15 adds the ReplicaState which includes new field ReplicaEpoch and +// the ReplicaID, and deprecates the old ReplicaID (KIP-903). +type FetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The cluster ID, if known. This is used to validate metadata fetches + // prior to broker registration. + // + // This field has a default of null. + ClusterID *string // tag 0 + + // ReplicaID is the broker ID of performing the fetch request. Standard + // clients should use -1. To be a "debug" replica, use -2. The debug + // replica can be used to fetch messages from non-leaders. + // + // This field has a default of -1. + ReplicaID int32 // v0-v14 + + // ReplicaState is a broker-only tag for v15+, see KIP-903 for more details. + ReplicaState FetchRequestReplicaState // tag 1 + + // MaxWaitMillis is how long to wait for MinBytes to be hit before a broker + // responds to a fetch request. + MaxWaitMillis int32 + + // MinBytes is the minimum amount of bytes to attempt to read before a broker + // responds to a fetch request. + MinBytes int32 + + // MaxBytes is the maximum amount of bytes to read in a fetch request. The + // response can exceed MaxBytes if the first record in the first non-empty + // partition is larger than MaxBytes. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 // v3+ + + // IsolationLevel changes which messages are fetched. Follower replica ID's + // (non-negative, non-standard-client) fetch from the end. + // + // Standard clients fetch from the high watermark, which corresponds to + // IsolationLevel 0, READ_UNCOMMITTED. + // + // To only read committed records, use IsolationLevel 1, corresponding to + // READ_COMMITTED. + IsolationLevel int8 // v4+ + + // SessionID is used to potentially reduce the amount of back and forth + // data between a client and a broker. If opting in to sessions, the first + // ID used should be 0, and thereafter (until session resets) the ID should + // be the ID returned in the fetch response. + // + // Read KIP-227 for more details. Use -1 if you want to disable sessions. + SessionID int32 // v7+ + + // SessionEpoch is the session epoch for this request if using sessions. + // + // Read KIP-227 for more details. Use -1 if you are not using sessions. + // + // This field has a default of -1. + SessionEpoch int32 // v7+ + + // Topic contains topics to try to fetch records for. + Topics []FetchRequestTopic + + // ForgottenTopics contains topics and partitions that a fetch session + // wants to remove from its session. + // + // See KIP-227 for more details. + ForgottenTopics []FetchRequestForgottenTopic // v7+ + + // Rack of the consumer making this request (see KIP-392; introduced in + // Kafka 2.2.0). + Rack string // v11+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchRequest) Key() int16 { return 1 } +func (*FetchRequest) MaxVersion() int16 { return 16 } +func (v *FetchRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchRequest) GetVersion() int16 { return v.Version } +func (v *FetchRequest) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchRequest) ResponseKind() Response { + r := &FetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchRequest) RequestWith(ctx context.Context, r Requestor) (*FetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchResponse) + return resp, err +} + +func (v *FetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 0 && version <= 14 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxWaitMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MinBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.SessionEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 9 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FetchOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 12 { + v := v.LastFetchedEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.PartitionMaxBytes + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.ForgottenTopics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 7 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.ReplicaState, (func() FetchRequestReplicaState { var v FetchRequestReplicaState; v.Default(); return v })()) { + toEncode = append(toEncode, 1) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + case 1: + { + v := v.ReplicaState + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fReplicaState: + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fReplicaState + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 0 && version <= 14 { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxWaitMillis = v + } + { + v := b.Int32() + s.MinBytes = v + } + if version >= 3 { + v := b.Int32() + s.MaxBytes = v + } + if version >= 4 { + v := b.Int8() + s.IsolationLevel = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + if version >= 7 { + v := b.Int32() + s.SessionEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 9 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.FetchOffset = v + } + if version >= 12 { + v := b.Int32() + s.LastFetchedEpoch = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + { + v := b.Int32() + s.PartitionMaxBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 7 { + v := s.ForgottenTopics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestForgottenTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 7 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ForgottenTopics = v + } + if version >= 11 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Rack = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.ReplicaState + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + { + v := b.Int64() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchRequest returns a pointer to a default FetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchRequest() *FetchRequest { + var v FetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequest. +func (v *FetchRequest) Default() { + v.ClusterID = nil + v.ReplicaID = -1 + { + v := &v.ReplicaState + _ = v + v.ID = -1 + v.Epoch = -1 + } + v.MaxBytes = 2147483647 + v.SessionEpoch = -1 +} + +// NewFetchRequest returns a default FetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequest() FetchRequest { + var v FetchRequest + v.Default() + return v +} + +type FetchResponseTopicPartitionDivergingEpoch struct { + // This field has a default of -1. + Epoch int32 + + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionDivergingEpoch. +func (v *FetchResponseTopicPartitionDivergingEpoch) Default() { + v.Epoch = -1 + v.EndOffset = -1 +} + +// NewFetchResponseTopicPartitionDivergingEpoch returns a default FetchResponseTopicPartitionDivergingEpoch +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionDivergingEpoch() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v +} + +type FetchResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionCurrentLeader. +func (v *FetchResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewFetchResponseTopicPartitionCurrentLeader returns a default FetchResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionCurrentLeader() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchResponseTopicPartitionSnapshotID struct { + // This field has a default of -1. + EndOffset int64 + + // This field has a default of -1. + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionSnapshotID. +func (v *FetchResponseTopicPartitionSnapshotID) Default() { + v.EndOffset = -1 + v.Epoch = -1 +} + +// NewFetchResponseTopicPartitionSnapshotID returns a default FetchResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionSnapshotID() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchResponseTopicPartitionAbortedTransaction struct { + // ProducerID is the producer ID that caused this aborted transaction. + ProducerID int64 + + // FirstOffset is the offset where this aborted transaction began. + FirstOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionAbortedTransaction. +func (v *FetchResponseTopicPartitionAbortedTransaction) Default() { +} + +// NewFetchResponseTopicPartitionAbortedTransaction returns a default FetchResponseTopicPartitionAbortedTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionAbortedTransaction() FetchResponseTopicPartitionAbortedTransaction { + var v FetchResponseTopicPartitionAbortedTransaction + v.Default() + return v +} + +type FetchResponseTopicPartition struct { + // Partition is a partition in a topic that records may have been + // received for. + Partition int32 + + // ErrorCode is an error returned for an individual partition in a + // fetch request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not + // authorized to read the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // does not exist on this broker. + // + // UNSUPPORTED_COMPRESSION_TYPE is returned if the request version was + // under 10 and the batch is compressed with zstd. + // + // UNSUPPORTED_VERSION is returned if the broker has records newer than + // the client can support (magic value) and the broker has disabled + // message downconversion. + // + // NOT_LEADER_FOR_PARTITION is returned if requesting data for this + // partition as a follower (non-negative ReplicaID) and the broker + // is not the leader for this partition. + // + // REPLICA_NOT_AVAILABLE is returned if the partition exists but + // the requested broker is not the leader for it. + // + // KAFKA_STORAGE_EXCEPTION is returned if the requested partition is + // offline. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a larger leader + // epoch than the broker knows of. + // + // FENCED_LEADER_EPOCH is returned if the request used a smaller leader + // epoch than the broker is at (see KIP-320). + // + // OFFSET_OUT_OF_RANGE is returned if requesting an offset past the + // current end offset or before the beginning offset. + // + // UNKNOWN_TOPIC_ID is returned if using uuid's and the uuid is unknown + // (v13+ / Kafka 3.1+). + // + // OFFSET_MOVED_TO_TIERED_STORAGE is returned if a follower is trying to + // fetch from an offset that is now in tiered storage. + ErrorCode int16 + + // HighWatermark is the current high watermark for this partition, + // that is, the current offset that is on all in sync replicas. + HighWatermark int64 + + // LastStableOffset is the offset at which all prior offsets have + // been "decided". Non transactional records are always decided + // immediately, but transactional records are only decided once + // they are commited or aborted. + // + // The LastStableOffset will always be at or under the HighWatermark. + // + // This field has a default of -1. + LastStableOffset int64 // v4+ + + // LogStartOffset is the beginning offset for this partition. + // This field was added for KIP-107. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // In case divergence is detected based on the LastFetchedEpoch and + // FetchOffset in the request, this field indicates the largest epoch and + // its end offset such that subsequent records are known to diverge. + DivergingEpoch FetchResponseTopicPartitionDivergingEpoch // tag 0 + + // CurrentLeader is the currently known leader ID and epoch for this + // partition. + CurrentLeader FetchResponseTopicPartitionCurrentLeader // tag 1 + + // In the case of fetching an offset less than the LogStartOffset, this + // is the end offset and epoch that should be used in the FetchSnapshot + // request. + SnapshotID FetchResponseTopicPartitionSnapshotID // tag 2 + + // AbortedTransactions is an array of aborted transactions within the + // returned offset range. This is only returned if the requested + // isolation level was READ_COMMITTED. + AbortedTransactions []FetchResponseTopicPartitionAbortedTransaction // v4+ + + // PreferredReadReplica is the preferred replica for the consumer + // to use on its next fetch request. See KIP-392. + // + // This field has a default of -1. + PreferredReadReplica int32 // v11+ + + // RecordBatches is an array of record batches for a topic partition. + // + // This is encoded as a raw byte array, with the standard int32 size + // prefix. One important catch to note is that the final element of the + // array may be **partial**. This is an optimization in Kafka that + // clients must deal with by discarding a partial trailing batch. + // + // Starting v2, this transitioned to the MessageSet v1 format (and this + // would contain many MessageV1 structs). + // + // Starting v4, this transitioned to the RecordBatch format (thus this + // contains many RecordBatch structs). + RecordBatches []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartition. +func (v *FetchResponseTopicPartition) Default() { + v.LastStableOffset = -1 + v.LogStartOffset = -1 + { + v := &v.DivergingEpoch + _ = v + v.Epoch = -1 + v.EndOffset = -1 + } + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } + { + v := &v.SnapshotID + _ = v + v.EndOffset = -1 + v.Epoch = -1 + } + v.PreferredReadReplica = -1 +} + +// NewFetchResponseTopicPartition returns a default FetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartition() FetchResponseTopicPartition { + var v FetchResponseTopicPartition + v.Default() + return v +} + +type FetchResponseTopic struct { + // Topic is a topic that records may have been received for. + Topic string // v0-v12 + + // TopicID is the uuid of a topic that records may have been received for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic that records may have + // been received for. + Partitions []FetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopic. +func (v *FetchResponseTopic) Default() { +} + +// NewFetchResponseTopic returns a default FetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopic() FetchResponseTopic { + var v FetchResponseTopic + v.Default() + return v +} + +type FetchResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseBroker. +func (v *FetchResponseBroker) Default() { +} + +// NewFetchResponseBroker returns a default FetchResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseBroker() FetchResponseBroker { + var v FetchResponseBroker + v.Default() + return v +} + +// FetchResponse is returned from a FetchRequest. +type FetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 8. + ThrottleMillis int32 // v1+ + + // ErrorCode is a full-response error code for a fetch request. This was + // added in support of KIP-227. This error is only non-zero if using fetch + // sessions. + // + // FETCH_SESSION_ID_NOT_FOUND is returned if the request used a + // session ID that the broker does not know of. + // + // INVALID_FETCH_SESSION_EPOCH is returned if the request used an + // invalid session epoch. + ErrorCode int16 // v7+ + + // SessionID is the id for this session if using sessions. + // + // See KIP-227 for more details. + SessionID int32 // v7+ + + // Topics contains an array of topic partitions and the records received + // for them. + Topics []FetchResponseTopic + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []FetchResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchResponse) Key() int16 { return 1 } +func (*FetchResponse) MaxVersion() int16 { return 16 } +func (v *FetchResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchResponse) GetVersion() int16 { return v.Version } +func (v *FetchResponse) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 8 } +func (v *FetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchResponse) RequestKind() Request { return &FetchRequest{Version: v.Version} } + +func (v *FetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LastStableOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.AbortedTransactions + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.PreferredReadReplica + dst = kbin.AppendInt32(dst, v) + } + { + v := v.RecordBatches + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.DivergingEpoch, (func() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 1) + } + if !reflect.DeepEqual(v.SnapshotID, (func() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v + })()) { + toEncode = append(toEncode, 2) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.DivergingEpoch + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fDivergingEpoch: + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fDivergingEpoch + } + } + case 1: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + case 2: + { + v := v.SnapshotID + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fSnapshotID: + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSnapshotID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.HighWatermark = v + } + if version >= 4 { + v := b.Int64() + s.LastStableOffset = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 4 { + v := s.AbortedTransactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []FetchResponseTopicPartitionAbortedTransaction{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartitionAbortedTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int64() + s.FirstOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.AbortedTransactions = v + } + if version >= 11 { + v := b.Int32() + s.PreferredReadReplica = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RecordBatches = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.DivergingEpoch + v.Default() + s := v + { + v := b.Int32() + s.Epoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchResponse returns a pointer to a default FetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchResponse() *FetchResponse { + var v FetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponse. +func (v *FetchResponse) Default() { +} + +// NewFetchResponse returns a default FetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponse() FetchResponse { + var v FetchResponse + v.Default() + return v +} + +type ListOffsetsRequestTopicPartition struct { + // Partition is a partition of a topic to get offsets for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v4+ + + // Timestamp controls which offset to return in a response for this + // partition. + // + // The offset returned will be the one of the message whose timestamp is + // the first timestamp greater than or equal to this requested timestamp. + // + // If no such message is found, then no offset is returned (-1). + // + // There exist two special timestamps: -2 corresponds to the earliest + // timestamp, and -1 corresponds to the latest. + // + // If you are talking to Kafka 3.0+, there exists an additional special + // timestamp -3 that returns the latest timestamp produced so far and its + // corresponding offset. This is subtly different from the latest offset, + // because timestamps are client-side generated. More importantly though, + // because this returns the latest produced timestamp, this can be used + // to determine topic "liveness" (when was the last produce?). + // Previously, this was not easy to determine. See KIP-734 for more + // detail. + // + // If you are talking to Kafka 3.4+ and using request version 8+ (for + // KIP-405), the new special timestamp -4 returns the local log start + // offset. In the context of tiered storage, the earliest local log start + // offset is the offset actually available on disk on the broker. + Timestamp int64 + + // MaxNumOffsets is the maximum number of offsets to report. + // This was removed after v0. + // + // This field has a default of 1. + MaxNumOffsets int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopicPartition. +func (v *ListOffsetsRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.MaxNumOffsets = 1 +} + +// NewListOffsetsRequestTopicPartition returns a default ListOffsetsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopicPartition() ListOffsetsRequestTopicPartition { + var v ListOffsetsRequestTopicPartition + v.Default() + return v +} + +type ListOffsetsRequestTopic struct { + // Topic is a topic to get offsets for. + Topic string + + // Partitions is an array of partitions in a topic to get offsets for. + Partitions []ListOffsetsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopic. +func (v *ListOffsetsRequestTopic) Default() { +} + +// NewListOffsetsRequestTopic returns a default ListOffsetsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopic() ListOffsetsRequestTopic { + var v ListOffsetsRequestTopic + v.Default() + return v +} + +// ListOffsetsRequest requests partition offsets from Kafka for use in +// consuming records. +// +// Version 5, introduced in Kafka 2.2.0, is the same as version 4. Using +// version 5 implies you support Kafka's OffsetNotAvailableException +// See KIP-207 for details. +// +// Version 7, introduced in Kafka 3.0, supports -3 as a timestamp to return +// the timestamp and offset for the record with the largest timestamp. +// +// Version 8, introduced in Kafka 3.4, supports -4 as a timestamp to return +// the local log start offset (in the context of tiered storage, see KIP-405). +type ListOffsetsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID is the broker ID to get offsets from. As a Kafka client, use -1. + // The consumer replica ID (-1) causes requests to only succeed if issued + // against the leader broker. + // + // This field has a default of -1. + ReplicaID int32 + + // IsolationLevel configures which record offsets are visible in the + // response. READ_UNCOMMITTED (0) makes all records visible. READ_COMMITTED + // (1) makes non-transactional and committed transactional records visible. + // READ_COMMITTED means all offsets smaller than the last stable offset and + // includes aborted transactions (allowing consumers to discard aborted + // records). + IsolationLevel int8 // v2+ + + // Topics is an array of topics to get offsets for. + Topics []ListOffsetsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsRequest) Key() int16 { return 2 } +func (*ListOffsetsRequest) MaxVersion() int16 { return 8 } +func (v *ListOffsetsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsRequest) GetVersion() int16 { return v.Version } +func (v *ListOffsetsRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsRequest) ResponseKind() Response { + r := &ListOffsetsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListOffsetsRequest) RequestWith(ctx context.Context, r Requestor) (*ListOffsetsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListOffsetsResponse) + return resp, err +} + +func (v *ListOffsetsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 0 { + v := v.MaxNumOffsets + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + if version >= 2 { + v := b.Int8() + s.IsolationLevel = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 4 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.Timestamp = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.MaxNumOffsets = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsRequest returns a pointer to a default ListOffsetsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsRequest() *ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequest. +func (v *ListOffsetsRequest) Default() { + v.ReplicaID = -1 +} + +// NewListOffsetsRequest returns a default ListOffsetsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequest() ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return v +} + +type ListOffsetsResponseTopicPartition struct { + // Partition is the partition this array slot is for. + Partition int32 + + // ErrorCode is any error for a topic partition in a ListOffsets request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic. + // + // INVALID_REQUEST is returned if the requested topic partitions had + // contained duplicates. + // + // KAFKA_STORAGE_EXCEPTION is returned if the topic / partition is in + // an offline log directory. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if the broker is using + // Kafka 0.10.0 messages and the requested timestamp was not -1 nor -2. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // If the request used the debug replica ID, the returned error will + // be REPLICA_NOT_AVAILABLE. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know + // of the requested topic or partition. + // + // FENCED_LEADER_EPOCH is returned if the broker has a higher leader + // epoch than what the request sent. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a leader epoch + // that the broker does not know about. + // + // OFFSET_NOT_AVAILABLE, introduced in Kafka 2.2.0 with produce request + // v5+, is returned when talking to a broker that is a new leader while + // that broker's high water mark catches up. This avoids situations where + // the old broker returned higher offsets than the new broker would. Note + // that if unclean leader election is allowed, you could still run into + // the situation where offsets returned from list offsets requests are + // not monotonically increasing. This error is only returned if the + // request used the consumer replica ID (-1). If the client did not use + // a v5+ list offsets request, LEADER_NOT_AVAILABLE is returned. + // See KIP-207 for more details. + ErrorCode int16 + + // OldStyleOffsets is a list of offsets. This was removed after + // version 0 and, since it is so historic, is undocumented. + OldStyleOffsets []int64 + + // If the request was for the earliest or latest timestamp (-2 or -1), or + // if an offset could not be found after the requested one, this will be -1. + // + // This field has a default of -1. + Timestamp int64 // v1+ + + // Offset is the offset corresponding to the record on or after the + // requested timestamp. If one could not be found, this will be -1. + // + // This field has a default of -1. + Offset int64 // v1+ + + // LeaderEpoch is the leader epoch of the record at this offset, + // or -1 if there was no leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopicPartition. +func (v *ListOffsetsResponseTopicPartition) Default() { + v.Timestamp = -1 + v.Offset = -1 + v.LeaderEpoch = -1 +} + +// NewListOffsetsResponseTopicPartition returns a default ListOffsetsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopicPartition() ListOffsetsResponseTopicPartition { + var v ListOffsetsResponseTopicPartition + v.Default() + return v +} + +type ListOffsetsResponseTopic struct { + // Topic is the topic this array slot is for. + Topic string + + // Partitions is an array of partition responses corresponding to + // the requested partitions for a topic. + Partitions []ListOffsetsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopic. +func (v *ListOffsetsResponseTopic) Default() { +} + +// NewListOffsetsResponseTopic returns a default ListOffsetsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopic() ListOffsetsResponseTopic { + var v ListOffsetsResponseTopic + v.Default() + return v +} + +// ListOffsetsResponse is returned from a ListOffsetsRequest. +type ListOffsetsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics is an array of topic / partition responses corresponding to + // the requested topics and partitions. + Topics []ListOffsetsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsResponse) Key() int16 { return 2 } +func (*ListOffsetsResponse) MaxVersion() int16 { return 8 } +func (v *ListOffsetsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsResponse) GetVersion() int16 { return v.Version } +func (v *ListOffsetsResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *ListOffsetsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListOffsetsResponse) RequestKind() Request { return &ListOffsetsRequest{Version: v.Version} } + +func (v *ListOffsetsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.OldStyleOffsets + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if version >= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 0 { + v := s.OldStyleOffsets + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.OldStyleOffsets = v + } + if version >= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 1 { + v := b.Int64() + s.Offset = v + } + if version >= 4 { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsResponse returns a pointer to a default ListOffsetsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsResponse() *ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponse. +func (v *ListOffsetsResponse) Default() { +} + +// NewListOffsetsResponse returns a default ListOffsetsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponse() ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return v +} + +type MetadataRequestTopic struct { + // The topic ID. Only one of either topic ID or topic name should be used. + // If using the topic name, this should just be the default empty value. + TopicID [16]byte // v10+ + + // Topic is the topic to request metadata for. Version 10 switched this + // from a string to a nullable string; if using a topic ID, this field + // should be null. + Topic *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequestTopic. +func (v *MetadataRequestTopic) Default() { +} + +// NewMetadataRequestTopic returns a default MetadataRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequestTopic() MetadataRequestTopic { + var v MetadataRequestTopic + v.Default() + return v +} + +// MetadataRequest requests metadata from Kafka. +type MetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is a list of topics to return metadata about. If this is null + // in v1+, all topics are included. If this is empty, no topics are. + // For v0 (= 9 } +func (v *MetadataRequest) ResponseKind() Response { + r := &MetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *MetadataRequest) RequestWith(ctx context.Context, r Requestor) (*MetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*MetadataResponse) + return resp, err +} + +func (v *MetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if version >= 1 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Topic + if version < 10 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.AllowAutoTopicCreation + dst = kbin.AppendBool(dst, v) + } + if version >= 8 && version <= 10 { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 8 { + v := v.IncludeTopicAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 1 || l == 0 { + a = []MetadataRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + { + var v *string + if version < 10 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Bool() + s.AllowAutoTopicCreation = v + } + if version >= 8 && version <= 10 { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 8 { + v := b.Bool() + s.IncludeTopicAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataRequest returns a pointer to a default MetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataRequest() *MetadataRequest { + var v MetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequest. +func (v *MetadataRequest) Default() { +} + +// NewMetadataRequest returns a default MetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequest() MetadataRequest { + var v MetadataRequest + v.Default() + return v +} + +type MetadataResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseBroker. +func (v *MetadataResponseBroker) Default() { +} + +// NewMetadataResponseBroker returns a default MetadataResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseBroker() MetadataResponseBroker { + var v MetadataResponseBroker + v.Default() + return v +} + +type MetadataResponseTopicPartition struct { + // ErrorCode is any error for a partition in topic metadata. + // + // LEADER_NOT_AVAILABLE is returned if a leader is unavailable for this + // partition. For v0 metadata responses, this is also returned if a + // partition leader's listener does not exist. + // + // LISTENER_NOT_FOUND is returned if a leader ID is known but the + // listener for it is not (v1+). + // + // REPLICA_NOT_AVAILABLE is returned in v0 responses if any replica is + // unavailable. + // + // UNKNOWN_TOPIC_ID is returned if using a topic ID and the ID does not + // exist. + ErrorCode int16 + + // Partition is a partition number for a topic. + Partition int32 + + // Leader is the broker leader for this partition. This will be -1 + // on leader / listener error. + Leader int32 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0 is the + // epoch of the broker leader. + // + // This field has a default of -1. + LeaderEpoch int32 // v7+ + + // Replicas returns all broker IDs containing replicas of this partition. + Replicas []int32 + + // ISR returns all broker IDs of in-sync replicas of this partition. + ISR []int32 + + // OfflineReplicas, proposed in KIP-112 and introduced in Kafka 1.0, + // returns all offline broker IDs that should be replicating this partition. + OfflineReplicas []int32 // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopicPartition. +func (v *MetadataResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewMetadataResponseTopicPartition returns a default MetadataResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopicPartition() MetadataResponseTopicPartition { + var v MetadataResponseTopicPartition + v.Default() + return v +} + +type MetadataResponseTopic struct { + // ErrorCode is any error for a topic in a metadata request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic, or if the metadata request specified topic auto + // creation, the topic did not exist, and the user lacks permission to create. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if a topic does not exist and + // the request did not specify autocreation. + // + // LEADER_NOT_AVAILABLE is returned if a new topic is created successfully + // (since there is no leader on an immediately new topic). + // + // There can be a myriad of other errors for unsuccessful topic creation. + ErrorCode int16 + + // Topic is the topic this metadata corresponds to. + Topic *string + + // The topic ID. + TopicID [16]byte // v10+ + + // IsInternal signifies whether this topic is a Kafka internal topic. + IsInternal bool // v1+ + + // Partitions contains metadata about partitions for a topic. + Partitions []MetadataResponseTopicPartition + + // AuthorizedOperations, proposed in KIP-430 and introduced in Kafka 2.3.0, + // is a bitfield (corresponding to AclOperation) containing which operations + // the client is allowed to perform on this topic. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopic. +func (v *MetadataResponseTopic) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponseTopic returns a default MetadataResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopic() MetadataResponseTopic { + var v MetadataResponseTopic + v.Default() + return v +} + +// MetadataResponse is returned from a MetdataRequest. +type MetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v3+ + + // Brokers is a set of alive Kafka brokers. + Brokers []MetadataResponseBroker + + // ClusterID, proposed in KIP-78 and introduced in Kafka 0.10.1.0, is a + // unique string specifying the cluster that the replying Kafka belongs to. + ClusterID *string // v2+ + + // ControllerID is the ID of the controller broker (the admin broker). + // + // This field has a default of -1. + ControllerID int32 // v1+ + + // Topics contains metadata about each topic requested in the + // MetadataRequest. + Topics []MetadataResponseTopic + + // AuthorizedOperations is a bitfield containing which operations the client + // is allowed to perform on this cluster. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8-v10 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*MetadataResponse) Key() int16 { return 3 } +func (*MetadataResponse) MaxVersion() int16 { return 12 } +func (v *MetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *MetadataResponse) GetVersion() int16 { return v.Version } +func (v *MetadataResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *MetadataResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *MetadataResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *MetadataResponse) RequestKind() Request { return &MetadataRequest{Version: v.Version} } + +func (v *MetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topic + if version < 12 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if version >= 1 { + v := v.IsInternal + dst = kbin.AppendBool(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 5 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 && version <= 10 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + if version >= 1 { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if version < 12 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + if version >= 1 { + v := b.Bool() + s.IsInternal = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.Leader = v + } + if version >= 7 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 5 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 8 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 && version <= 10 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataResponse returns a pointer to a default MetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataResponse() *MetadataResponse { + var v MetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponse. +func (v *MetadataResponse) Default() { + v.ControllerID = -1 + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponse returns a default MetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponse() MetadataResponse { + var v MetadataResponse + v.Default() + return v +} + +// LeaderAndISRRequestTopicPartition is a common struct that is used across +// different versions of LeaderAndISRRequest. +type LeaderAndISRRequestTopicPartition struct { + Topic string // v0-v1 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + AddingReplicas []int32 // v3+ + + RemovingReplicas []int32 // v3+ + + IsNew bool // v1+ + + LeaderRecoveryState int8 // v6+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicPartition. +func (v *LeaderAndISRRequestTopicPartition) Default() { +} + +// NewLeaderAndISRRequestTopicPartition returns a default LeaderAndISRRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicPartition() LeaderAndISRRequestTopicPartition { + var v LeaderAndISRRequestTopicPartition + v.Default() + return v +} + +// LeaderAndISRResponseTopicPartition is a common struct that is used across +// different versions of LeaderAndISRResponse. +type LeaderAndISRResponseTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopicPartition. +func (v *LeaderAndISRResponseTopicPartition) Default() { +} + +// NewLeaderAndISRResponseTopicPartition returns a default LeaderAndISRResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopicPartition() LeaderAndISRResponseTopicPartition { + var v LeaderAndISRResponseTopicPartition + v.Default() + return v +} + +type LeaderAndISRRequestTopicState struct { + Topic string + + TopicID [16]byte // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicState. +func (v *LeaderAndISRRequestTopicState) Default() { +} + +// NewLeaderAndISRRequestTopicState returns a default LeaderAndISRRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicState() LeaderAndISRRequestTopicState { + var v LeaderAndISRRequestTopicState + v.Default() + return v +} + +type LeaderAndISRRequestLiveLeader struct { + BrokerID int32 + + Host string + + Port int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestLiveLeader. +func (v *LeaderAndISRRequestLiveLeader) Default() { +} + +// NewLeaderAndISRRequestLiveLeader returns a default LeaderAndISRRequestLiveLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestLiveLeader() LeaderAndISRRequestLiveLeader { + var v LeaderAndISRRequestLiveLeader + v.Default() + return v +} + +// LeaderAndISRRequest is an advanced request that controller brokers use +// to broadcast state to other brokers. Manually using this request is a +// great way to break your cluster. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 1.0 introduced version 1. Kafka 2.2 introduced version 2, proposed +// in KIP-380, which changed the layout of the struct to be more memory +// efficient. Kafka 2.4.0 introduced version 3 with KIP-455. +// Kafka 3.4 introduced version 7 with KIP-866. +type LeaderAndISRRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v7+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + Type int8 // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition // v0-v1 + + TopicStates []LeaderAndISRRequestTopicState // v2+ + + LiveLeaders []LeaderAndISRRequestLiveLeader + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRRequest) Key() int16 { return 4 } +func (*LeaderAndISRRequest) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRRequest) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRRequest) ResponseKind() Response { + r := &LeaderAndISRResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaderAndISRRequest) RequestWith(ctx context.Context, r Requestor) (*LeaderAndISRResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaderAndISRResponse) + return resp, err +} + +func (v *LeaderAndISRRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.Type + dst = kbin.AppendInt8(dst, v) + } + if version >= 0 && version <= 1 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveLeaders + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 7 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 5 { + v := b.Int8() + s.Type = v + } + if version >= 0 && version <= 1 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 2 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 5 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveLeaders + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestLiveLeader, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveLeaders = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRRequest returns a pointer to a default LeaderAndISRRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRRequest() *LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequest. +func (v *LeaderAndISRRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewLeaderAndISRRequest returns a default LeaderAndISRRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequest() LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return v +} + +type LeaderAndISRResponseTopic struct { + TopicID [16]byte + + Partitions []LeaderAndISRResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopic. +func (v *LeaderAndISRResponseTopic) Default() { +} + +// NewLeaderAndISRResponseTopic returns a default LeaderAndISRResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopic() LeaderAndISRResponseTopic { + var v LeaderAndISRResponseTopic + v.Default() + return v +} + +// LeaderAndISRResponse is returned from a LeaderAndISRRequest. +type LeaderAndISRResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Partitions []LeaderAndISRResponseTopicPartition // v0-v4 + + Topics []LeaderAndISRResponseTopic // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRResponse) Key() int16 { return 4 } +func (*LeaderAndISRResponse) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRResponse) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRResponse) RequestKind() Request { return &LeaderAndISRRequest{Version: v.Version} } + +func (v *LeaderAndISRResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 4 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 4 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 5 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRResponse returns a pointer to a default LeaderAndISRResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRResponse() *LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponse. +func (v *LeaderAndISRResponse) Default() { +} + +// NewLeaderAndISRResponse returns a default LeaderAndISRResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponse() LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return v +} + +type StopReplicaRequestTopicPartitionState struct { + Partition int32 + + // This field has a default of -1. + LeaderEpoch int32 + + Delete bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopicPartitionState. +func (v *StopReplicaRequestTopicPartitionState) Default() { + v.LeaderEpoch = -1 +} + +// NewStopReplicaRequestTopicPartitionState returns a default StopReplicaRequestTopicPartitionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopicPartitionState() StopReplicaRequestTopicPartitionState { + var v StopReplicaRequestTopicPartitionState + v.Default() + return v +} + +type StopReplicaRequestTopic struct { + Topic string + + Partition int32 + + Partitions []int32 // v1-v2 + + PartitionStates []StopReplicaRequestTopicPartitionState // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopic. +func (v *StopReplicaRequestTopic) Default() { +} + +// NewStopReplicaRequestTopic returns a default StopReplicaRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopic() StopReplicaRequestTopic { + var v StopReplicaRequestTopic + v.Default() + return v +} + +// StopReplicaRequest is an advanced request that brokers use to stop replicas. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 2.2 introduced version 1, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// +// Kafka 2.6 introduced version 3, proposed in KIP-570, reorganizes partitions +// to be stored and adds the leader epoch and delete partition fields per partition. +// Kafka 3.4 introduced version 4 with KIP-866. +type StopReplicaRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + ControllerEpoch int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v4+ + + // This field has a default of -1. + BrokerEpoch int64 // v1+ + + DeletePartitions bool // v0-v2 + + Topics []StopReplicaRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaRequest) Key() int16 { return 5 } +func (*StopReplicaRequest) MaxVersion() int16 { return 4 } +func (v *StopReplicaRequest) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaRequest) GetVersion() int16 { return v.Version } +func (v *StopReplicaRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaRequest) ResponseKind() Response { + r := &StopReplicaResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *StopReplicaRequest) RequestWith(ctx context.Context, r Requestor) (*StopReplicaResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*StopReplicaResponse) + return resp, err +} + +func (v *StopReplicaRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 2 { + v := v.DeletePartitions + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 && version <= 2 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Delete + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 4 { + v := b.Bool() + s.IsKRaftController = v + } + if version >= 1 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 2 { + v := b.Bool() + s.DeletePartitions = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Partition = v + } + if version >= 1 && version <= 2 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if version >= 3 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopicPartitionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.Delete = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaRequest returns a pointer to a default StopReplicaRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaRequest() *StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequest. +func (v *StopReplicaRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewStopReplicaRequest returns a default StopReplicaRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequest() StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return v +} + +type StopReplicaResponsePartition struct { + Topic string + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponsePartition. +func (v *StopReplicaResponsePartition) Default() { +} + +// NewStopReplicaResponsePartition returns a default StopReplicaResponsePartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponsePartition() StopReplicaResponsePartition { + var v StopReplicaResponsePartition + v.Default() + return v +} + +// StopReplicasResponse is returned from a StopReplicasRequest. +type StopReplicaResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Version 3 returns FENCED_LEADER_EPOCH if the leader is stale (KIP-570). + ErrorCode int16 + + Partitions []StopReplicaResponsePartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaResponse) Key() int16 { return 5 } +func (*StopReplicaResponse) MaxVersion() int16 { return 4 } +func (v *StopReplicaResponse) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaResponse) GetVersion() int16 { return v.Version } +func (v *StopReplicaResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaResponse) RequestKind() Request { return &StopReplicaRequest{Version: v.Version} } + +func (v *StopReplicaResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaResponsePartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaResponse returns a pointer to a default StopReplicaResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaResponse() *StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponse. +func (v *StopReplicaResponse) Default() { +} + +// NewStopReplicaResponse returns a default StopReplicaResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponse() StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return v +} + +type UpdateMetadataRequestTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + OfflineReplicas []int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicPartition. +func (v *UpdateMetadataRequestTopicPartition) Default() { +} + +// NewUpdateMetadataRequestTopicPartition returns a default UpdateMetadataRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicPartition() UpdateMetadataRequestTopicPartition { + var v UpdateMetadataRequestTopicPartition + v.Default() + return v +} + +type UpdateMetadataRequestTopicState struct { + Topic string + + TopicID [16]byte // v7+ + + PartitionStates []UpdateMetadataRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicState. +func (v *UpdateMetadataRequestTopicState) Default() { +} + +// NewUpdateMetadataRequestTopicState returns a default UpdateMetadataRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicState() UpdateMetadataRequestTopicState { + var v UpdateMetadataRequestTopicState + v.Default() + return v +} + +type UpdateMetadataRequestLiveBrokerEndpoint struct { + Port int32 + + Host string + + ListenerName string // v3+ + + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBrokerEndpoint. +func (v *UpdateMetadataRequestLiveBrokerEndpoint) Default() { +} + +// NewUpdateMetadataRequestLiveBrokerEndpoint returns a default UpdateMetadataRequestLiveBrokerEndpoint +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBrokerEndpoint() UpdateMetadataRequestLiveBrokerEndpoint { + var v UpdateMetadataRequestLiveBrokerEndpoint + v.Default() + return v +} + +type UpdateMetadataRequestLiveBroker struct { + ID int32 + + Host string + + Port int32 + + Endpoints []UpdateMetadataRequestLiveBrokerEndpoint // v1+ + + Rack *string // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBroker. +func (v *UpdateMetadataRequestLiveBroker) Default() { +} + +// NewUpdateMetadataRequestLiveBroker returns a default UpdateMetadataRequestLiveBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBroker() UpdateMetadataRequestLiveBroker { + var v UpdateMetadataRequestLiveBroker + v.Default() + return v +} + +// UpdateMetadataRequest is an advanced request that brokers use to +// issue metadata updates to each other. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Version 1 changed the layout of the live brokers. +// +// Kafka 2.2 introduced version 5, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// Kafka 3.4 introduced version 8 with KIP-866. +type UpdateMetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v8+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v5+ + + PartitionStates []UpdateMetadataRequestTopicPartition // v0-v4 + + TopicStates []UpdateMetadataRequestTopicState // v5+ + + LiveBrokers []UpdateMetadataRequestLiveBroker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataRequest) Key() int16 { return 6 } +func (*UpdateMetadataRequest) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataRequest) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataRequest) ResponseKind() Response { + r := &UpdateMetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateMetadataRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateMetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateMetadataResponse) + return resp, err +} + +func (v *UpdateMetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 8 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 4 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveBrokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 0 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Endpoints + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ListenerName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 8 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 5 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 4 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 5 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveBrokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + if version >= 0 && version <= 0 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Port = v + } + if version >= 1 { + v := s.Endpoints + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBrokerEndpoint, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Port = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ListenerName = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Endpoints = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveBrokers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataRequest returns a pointer to a default UpdateMetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataRequest() *UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequest. +func (v *UpdateMetadataRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewUpdateMetadataRequest returns a default UpdateMetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequest() UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return v +} + +// UpdateMetadataResponses is returned from an UpdateMetadataRequest. +type UpdateMetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataResponse) Key() int16 { return 6 } +func (*UpdateMetadataResponse) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataResponse) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataResponse) RequestKind() Request { + return &UpdateMetadataRequest{Version: v.Version} +} + +func (v *UpdateMetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataResponse returns a pointer to a default UpdateMetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataResponse() *UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataResponse. +func (v *UpdateMetadataResponse) Default() { +} + +// NewUpdateMetadataResponse returns a default UpdateMetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataResponse() UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return v +} + +// ControlledShutdownRequest is an advanced request that can be used to +// sthudown a broker in a controlled manner. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. However, the minimal amount of fields +// here makes the usage rather obvious. +// +// Kafka 2.2.0 introduced version 2, proposed in KIP-380. +// +// Note that version 0 of this request uses a special encoding format +// where the request does not include the client ID. +type ControlledShutdownRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + BrokerID int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownRequest) Key() int16 { return 7 } +func (*ControlledShutdownRequest) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownRequest) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownRequest) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownRequest) ResponseKind() Response { + r := &ControlledShutdownResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ControlledShutdownRequest) RequestWith(ctx context.Context, r Requestor) (*ControlledShutdownResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ControlledShutdownResponse) + return resp, err +} + +func (v *ControlledShutdownRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownRequest returns a pointer to a default ControlledShutdownRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownRequest() *ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownRequest. +func (v *ControlledShutdownRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewControlledShutdownRequest returns a default ControlledShutdownRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownRequest() ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return v +} + +type ControlledShutdownResponsePartitionsRemaining struct { + Topic string + + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponsePartitionsRemaining. +func (v *ControlledShutdownResponsePartitionsRemaining) Default() { +} + +// NewControlledShutdownResponsePartitionsRemaining returns a default ControlledShutdownResponsePartitionsRemaining +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponsePartitionsRemaining() ControlledShutdownResponsePartitionsRemaining { + var v ControlledShutdownResponsePartitionsRemaining + v.Default() + return v +} + +// ControlledShutdownResponse is returned from a ControlledShutdownRequest. +type ControlledShutdownResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + PartitionsRemaining []ControlledShutdownResponsePartitionsRemaining + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownResponse) Key() int16 { return 7 } +func (*ControlledShutdownResponse) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownResponse) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownResponse) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownResponse) RequestKind() Request { + return &ControlledShutdownRequest{Version: v.Version} +} + +func (v *ControlledShutdownResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PartitionsRemaining + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.PartitionsRemaining + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ControlledShutdownResponsePartitionsRemaining, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionsRemaining = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownResponse returns a pointer to a default ControlledShutdownResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownResponse() *ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponse. +func (v *ControlledShutdownResponse) Default() { +} + +// NewControlledShutdownResponse returns a default ControlledShutdownResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponse() ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return v +} + +type OffsetCommitRequestTopicPartition struct { + // Partition if a partition to commit offsets for. + Partition int32 + + // Offset is an offset to commit. + Offset int64 + + // Timestamp is the first iteration of tracking how long offset commits + // should persist in Kafka. This field only existed for v1. + // The expiration would be timestamp + offset.retention.minutes, or, if + // timestamp was zero, current time + offset.retention.minutes. + // + // This field has a default of -1. + Timestamp int64 // v1-v1 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // is the leader epoch of the record this request is committing. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v6+ + + // Metadata is optional data to include with committing the offset. This + // can contain information such as which node is doing the committing, etc. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopicPartition. +func (v *OffsetCommitRequestTopicPartition) Default() { + v.Timestamp = -1 + v.LeaderEpoch = -1 +} + +// NewOffsetCommitRequestTopicPartition returns a default OffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopicPartition() OffsetCommitRequestTopicPartition { + var v OffsetCommitRequestTopicPartition + v.Default() + return v +} + +type OffsetCommitRequestTopic struct { + // Topic is a topic to commit offsets for. + Topic string + + // Partitions contains partitions in a topic for which to commit offsets. + Partitions []OffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopic. +func (v *OffsetCommitRequestTopic) Default() { +} + +// NewOffsetCommitRequestTopic returns a default OffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopic() OffsetCommitRequestTopic { + var v OffsetCommitRequestTopic + v.Default() + return v +} + +// OffsetCommitRequest commits offsets for consumed topics / partitions in +// a group. +type OffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group this request is committing offsets to. + Group string + + // Generation being -1 and group being empty means the group is being used + // to store offsets only. No generation validation, no rebalancing. + // + // This field has a default of -1. + Generation int32 // v1+ + + // MemberID is the ID of the client issuing this request in the group. + MemberID string // v1+ + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v7+ + + // RetentionTimeMillis is how long this commit will persist in Kafka. + // + // This was introduced in v2, replacing an individual topic/partition's + // Timestamp from v1, and was removed in v5 with Kafka 2.1.0. + // + // This was removed because rarely committing consumers could have their + // offsets expired before committing, even though the consumer was still + // active. After restarting or rebalancing, the consumer would now not know + // the last committed offset and would have to start at the beginning or end, + // leading to duplicates or log loss. + // + // Post 2.1.0, if this field is empty, offsets are only deleted once the + // group is empty. Read KIP-211 for more details. + // + // This field has a default of -1. + RetentionTimeMillis int64 // v2-v4 + + // Topics is contains topics and partitions for which to commit offsets. + Topics []OffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitRequest) Key() int16 { return 8 } +func (*OffsetCommitRequest) MaxVersion() int16 { return 9 } +func (v *OffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *OffsetCommitRequest) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetCommitRequest) ResponseKind() Response { + r := &OffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetCommitResponse) + return resp, err +} + +func (v *OffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 2 && version <= 4 { + v := v.RetentionTimeMillis + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 6 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 1 { + v := b.Int32() + s.Generation = v + } + if version >= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 2 && version <= 4 { + v := b.Int64() + s.RetentionTimeMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 6 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitRequest returns a pointer to a default OffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitRequest() *OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequest. +func (v *OffsetCommitRequest) Default() { + v.Generation = -1 + v.RetentionTimeMillis = -1 +} + +// NewOffsetCommitRequest returns a default OffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequest() OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return v +} + +type OffsetCommitResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic / partition does + // not exist. + // + // OFFSET_METADATA_TOO_LARGE is returned if the request metadata is + // larger than the brokers offset.metadata.max.bytes. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // UNKNOWN_MEMBER_ID is returned if the group is dead or the group does not + // know of the request's member ID. + // + // REBALANCE_IN_PROGRESS is returned if the group is finishing a rebalance. + // + // INVALID_COMMIT_OFFSET_SIZE is returned if the offset commit results in + // a record batch that is too large (likely due to large metadata). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopicPartition. +func (v *OffsetCommitResponseTopicPartition) Default() { +} + +// NewOffsetCommitResponseTopicPartition returns a default OffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopicPartition() OffsetCommitResponseTopicPartition { + var v OffsetCommitResponseTopicPartition + v.Default() + return v +} + +type OffsetCommitResponseTopic struct { + // Topic is the topic this offset commit response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopic. +func (v *OffsetCommitResponseTopic) Default() { +} + +// NewOffsetCommitResponseTopic returns a default OffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopic() OffsetCommitResponseTopic { + var v OffsetCommitResponseTopic + v.Default() + return v +} + +// OffsetCommitResponse is returned from an OffsetCommitRequest. +type OffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each topic / partition in the commit request. + Topics []OffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitResponse) Key() int16 { return 8 } +func (*OffsetCommitResponse) MaxVersion() int16 { return 9 } +func (v *OffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *OffsetCommitResponse) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetCommitResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetCommitResponse) RequestKind() Request { return &OffsetCommitRequest{Version: v.Version} } + +func (v *OffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitResponse returns a pointer to a default OffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitResponse() *OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponse. +func (v *OffsetCommitResponse) Default() { +} + +// NewOffsetCommitResponse returns a default OffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponse() OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return v +} + +type OffsetFetchRequestTopic struct { + // Topic is a topic to fetch offsets for. + Topic string + + // Partitions in a list of partitions in a group to fetch offsets for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestTopic. +func (v *OffsetFetchRequestTopic) Default() { +} + +// NewOffsetFetchRequestTopic returns a default OffsetFetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestTopic() OffsetFetchRequestTopic { + var v OffsetFetchRequestTopic + v.Default() + return v +} + +type OffsetFetchRequestGroupTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroupTopic. +func (v *OffsetFetchRequestGroupTopic) Default() { +} + +// NewOffsetFetchRequestGroupTopic returns a default OffsetFetchRequestGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroupTopic() OffsetFetchRequestGroupTopic { + var v OffsetFetchRequestGroupTopic + v.Default() + return v +} + +type OffsetFetchRequestGroup struct { + Group string + + // The member ID assigned by the group coordinator if using the new consumer protocol (KIP-848). + MemberID *string // v9+ + + // The member epoch if using the new consumer protocol (KIP-848). + // + // This field has a default of -1. + MemberEpoch int32 // v9+ + + Topics []OffsetFetchRequestGroupTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroup. +func (v *OffsetFetchRequestGroup) Default() { + v.MemberEpoch = -1 +} + +// NewOffsetFetchRequestGroup returns a default OffsetFetchRequestGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroup() OffsetFetchRequestGroup { + var v OffsetFetchRequestGroup + v.Default() + return v +} + +// OffsetFetchRequest requests the most recent committed offsets for topic +// partitions in a group. +type OffsetFetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to fetch offsets for. + Group string // v0-v7 + + // Topics contains topics to fetch offets for. Version 2+ allows this to be + // null to return all topics the client is authorized to describe in the group. + Topics []OffsetFetchRequestTopic // v0-v7 + + // Groups, introduced in v8 (Kafka 3.0), allows for fetching offsets for + // multiple groups at a time. + // + // The fields here mirror the old top level fields on the request, thus they + // are left undocumented. Refer to the top level documentation if necessary. + Groups []OffsetFetchRequestGroup // v8+ + + // RequireStable signifies whether the broker should wait on returning + // unstable offsets, instead setting a retryable error on the relevant + // unstable partitions (UNSTABLE_OFFSET_COMMIT). See KIP-447 for more + // details. + RequireStable bool // v7+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchRequest) Key() int16 { return 9 } +func (*OffsetFetchRequest) MaxVersion() int16 { return 9 } +func (v *OffsetFetchRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchRequest) GetVersion() int16 { return v.Version } +func (v *OffsetFetchRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetFetchRequest) ResponseKind() Response { + r := &OffsetFetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetFetchRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetFetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetFetchResponse) + return resp, err +} + +func (v *OffsetFetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 0 && version <= 7 { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 7 { + v := v.Topics + if version >= 2 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 9 { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.RequireStable + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 0 && version <= 7 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 2 || l == 0 { + a = []OffsetFetchRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 9 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + if version >= 9 { + v := b.Int32() + s.MemberEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []OffsetFetchRequestGroupTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if version >= 7 { + v := b.Bool() + s.RequireStable = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchRequest returns a pointer to a default OffsetFetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchRequest() *OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequest. +func (v *OffsetFetchRequest) Default() { +} + +// NewOffsetFetchRequest returns a default OffsetFetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequest() OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return v +} + +type OffsetFetchResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // Offset is the most recently committed offset for this topic partition + // in a group. + Offset int64 + + // LeaderEpoch is the leader epoch of the last consumed record. + // + // This was proposed in KIP-320 and introduced in Kafka 2.1.0 and allows + // clients to detect log truncation. See the KIP for more details. + // + // This field has a default of -1. + LeaderEpoch int32 // v5+ + + // Metadata is client provided metadata corresponding to the offset commit. + // This can be useful for adding who made the commit, etc. + Metadata *string + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the requested topic or partition + // is unknown. + // + // UNSTABLE_OFFSET_COMMIT is returned for v7+ if the request set RequireStable. + // See KIP-447 for more details. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopicPartition. +func (v *OffsetFetchResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseTopicPartition returns a default OffsetFetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopicPartition() OffsetFetchResponseTopicPartition { + var v OffsetFetchResponseTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseTopic struct { + // Topic is the topic this offset fetch response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetFetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopic. +func (v *OffsetFetchResponseTopic) Default() { +} + +// NewOffsetFetchResponseTopic returns a default OffsetFetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopic() OffsetFetchResponseTopic { + var v OffsetFetchResponseTopic + v.Default() + return v +} + +type OffsetFetchResponseGroupTopicPartition struct { + Partition int32 + + Offset int64 + + // This field has a default of -1. + LeaderEpoch int32 + + Metadata *string + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopicPartition. +func (v *OffsetFetchResponseGroupTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseGroupTopicPartition returns a default OffsetFetchResponseGroupTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopicPartition() OffsetFetchResponseGroupTopicPartition { + var v OffsetFetchResponseGroupTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseGroupTopic struct { + Topic string + + Partitions []OffsetFetchResponseGroupTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopic. +func (v *OffsetFetchResponseGroupTopic) Default() { +} + +// NewOffsetFetchResponseGroupTopic returns a default OffsetFetchResponseGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopic() OffsetFetchResponseGroupTopic { + var v OffsetFetchResponseGroupTopic + v.Default() + return v +} + +type OffsetFetchResponseGroup struct { + Group string + + Topics []OffsetFetchResponseGroupTopic + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroup. +func (v *OffsetFetchResponseGroup) Default() { +} + +// NewOffsetFetchResponseGroup returns a default OffsetFetchResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroup() OffsetFetchResponseGroup { + var v OffsetFetchResponseGroup + v.Default() + return v +} + +// OffsetFetchResponse is returned from an OffsetFetchRequest. +type OffsetFetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each requested topic/partition. + Topics []OffsetFetchResponseTopic // v0-v7 + + // ErrorCode is a top level error code that applies to all topic/partitions. + // This will be any group error. + ErrorCode int16 // v2-v7 + + // Groups is the response for all groups. Each field mirrors the fields in the + // top level request, thus they are left undocumented. Refer to the top level + // documentation if necessary. + Groups []OffsetFetchResponseGroup // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchResponse) Key() int16 { return 9 } +func (*OffsetFetchResponse) MaxVersion() int16 { return 9 } +func (v *OffsetFetchResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchResponse) GetVersion() int16 { return v.Version } +func (v *OffsetFetchResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetFetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetFetchResponse) RequestKind() Request { return &OffsetFetchRequest{Version: v.Version} } + +func (v *OffsetFetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 7 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 && version <= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 5 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 2 && version <= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchResponse returns a pointer to a default OffsetFetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchResponse() *OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponse. +func (v *OffsetFetchResponse) Default() { +} + +// NewOffsetFetchResponse returns a default OffsetFetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponse() OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return v +} + +// FindCoordinatorRequest requests the coordinator for a group or transaction. +// +// This coordinator is different from the broker leader coordinator. This +// coordinator is the partition leader for the partition that is storing +// the group or transaction ID. +type FindCoordinatorRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // CoordinatorKey is the ID to use for finding the coordinator. For groups, + // this is the group name, for transactional producer, this is the + // transactional ID. + CoordinatorKey string // v0-v3 + + // CoordinatorType is the type that key is. Groups are type 0, + // transactional IDs are type 1. + CoordinatorType int8 // v1+ + + // CoordinatorKeys contains all keys to find the coordinator for. + CoordinatorKeys []string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorRequest) Key() int16 { return 10 } +func (*FindCoordinatorRequest) MaxVersion() int16 { return 4 } +func (v *FindCoordinatorRequest) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorRequest) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorRequest) ResponseKind() Response { + r := &FindCoordinatorResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FindCoordinatorRequest) RequestWith(ctx context.Context, r Requestor) (*FindCoordinatorResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FindCoordinatorResponse) + return resp, err +} + +func (v *FindCoordinatorRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.CoordinatorKey + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.CoordinatorType + dst = kbin.AppendInt8(dst, v) + } + if version >= 4 { + v := v.CoordinatorKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.CoordinatorKey = v + } + if version >= 1 { + v := b.Int8() + s.CoordinatorType = v + } + if version >= 4 { + v := s.CoordinatorKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.CoordinatorKeys = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorRequest returns a pointer to a default FindCoordinatorRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorRequest() *FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorRequest. +func (v *FindCoordinatorRequest) Default() { +} + +// NewFindCoordinatorRequest returns a default FindCoordinatorRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorRequest() FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return v +} + +type FindCoordinatorResponseCoordinator struct { + Key string + + NodeID int32 + + Host string + + Port int32 + + ErrorCode int16 + + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponseCoordinator. +func (v *FindCoordinatorResponseCoordinator) Default() { +} + +// NewFindCoordinatorResponseCoordinator returns a default FindCoordinatorResponseCoordinator +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponseCoordinator() FindCoordinatorResponseCoordinator { + var v FindCoordinatorResponseCoordinator + v.Default() + return v +} + +// FindCoordinatorResponse is returned from a FindCoordinatorRequest. +type FindCoordinatorResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the request. + // + // GROUP_AUTHORIZATION_FAILED is returned if for a group ID request and the + // client is not authorized to describe groups. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for a transactional ID + // request and the client is not authorized to describe transactional IDs. + // + // INVALID_REQUEST is returned if not asking for a known type (group, + // or transaction). + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // for the requested ID, which would be if the group or transactional topic + // does not exist or the partition the requested key maps to is not available. + ErrorCode int16 // v0-v3 + + // ErrorMessage is an informative message if the request errored. + ErrorMessage *string // v1-v3 + + // NodeID is the broker ID of the coordinator. + NodeID int32 // v0-v3 + + // Host is the host of the coordinator. + Host string // v0-v3 + + // Port is the port of the coordinator. + Port int32 // v0-v3 + + // Coordinators, introduced for KIP-699, is the bulk response for + // coordinators. The fields in the struct exactly match the original fields + // in the FindCoordinatorResponse, thus they are left undocumented. + Coordinators []FindCoordinatorResponseCoordinator // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorResponse) Key() int16 { return 10 } +func (*FindCoordinatorResponse) MaxVersion() int16 { return 4 } +func (v *FindCoordinatorResponse) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorResponse) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *FindCoordinatorResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *FindCoordinatorResponse) RequestKind() Request { + return &FindCoordinatorRequest{Version: v.Version} +} + +func (v *FindCoordinatorResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 && version <= 3 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.Coordinators + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 && version <= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.NodeID = v + } + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.Port = v + } + if version >= 4 { + v := s.Coordinators + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FindCoordinatorResponseCoordinator, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Coordinators = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorResponse returns a pointer to a default FindCoordinatorResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorResponse() *FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponse. +func (v *FindCoordinatorResponse) Default() { +} + +// NewFindCoordinatorResponse returns a default FindCoordinatorResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponse() FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return v +} + +type JoinGroupRequestProtocol struct { + // Name is a name of a protocol. This is arbitrary, but is used + // in the official client to agree on a partition balancing strategy. + // + // The official client uses range, roundrobin, or sticky (which was + // introduced in KIP-54). + Name string + + // Metadata is arbitrary information to pass along with this + // protocol name for this member. + // + // Note that while this is not documented in any protocol page, + // this is usually a serialized GroupMemberMetadata as described in + // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal. + // + // The protocol metadata is where group members will communicate which + // topics they collectively as a group want to consume. + Metadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequestProtocol. +func (v *JoinGroupRequestProtocol) Default() { +} + +// NewJoinGroupRequestProtocol returns a default JoinGroupRequestProtocol +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequestProtocol() JoinGroupRequestProtocol { + var v JoinGroupRequestProtocol + v.Default() + return v +} + +// JoinGroupRequest issues a request to join a Kafka group. This will create a +// group if one does not exist. If joining an existing group, this may trigger +// a group rebalance. +// +// This will trigger a group rebalance if the request is from the group leader, +// or if the request is from a group member with different metadata, or if the +// request is with a new group member. +// +// Version 4 introduced replying to joins of existing groups with +// MEMBER_ID_REQUIRED, which requires re-issuing the join group with the +// returned member ID. See KIP-394 for more details. +// +// Version 5 introduced InstanceID, allowing for more "static" membership. +// See KIP-345 for more details. +type JoinGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to join. + Group string + + // SessionTimeoutMillis is how long a member in the group can go between + // heartbeats. If a member does not send a heartbeat within this timeout, + // the broker will remove the member from the group and initiate a rebalance. + SessionTimeoutMillis int32 + + // RebalanceTimeoutMillis is how long the broker waits for members to join a group + // once a rebalance begins. Kafka waits for the longest rebalance of all + // members in the group. Member sessions are still alive; heartbeats will be + // replied to with REBALANCE_IN_PROGRESS. Those members must transition to + // joining within this rebalance timeout. Members that do not rejoin within + // this timeout will be removed from the group. Members must commit offsets + // within this timeout. + // + // The first join for a new group has a 3 second grace period for other + // members to join; this grace period is extended until the RebalanceTimeoutMillis + // is up or until 3 seconds lapse with no new members. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 // v1+ + + // MemberID is the member ID to join the group with. When joining a group for + // the first time, use the empty string. The response will contain the member + // ID that should be used going forward. + MemberID string + + // InstanceID is a user configured ID that is used for making a group + // member "static", allowing many rebalances to be avoided. + InstanceID *string // v5+ + + // ProtocolType is the "type" of protocol being used for the join group. + // The initial group creation sets the type; all additional members must + // have the same type or they will be rejected. + // + // This is completely arbitrary, but the Java client and everything else + // uses "consumer" as the protocol type. + ProtocolType string + + // Protocols contains arbitrary information that group members use + // for rebalancing. All group members must agree on at least one protocol + // name. + Protocols []JoinGroupRequestProtocol + + // Reason is an optional reason the member is joining (or rejoining) the + // group (KIP-800, Kafka 3.2+). + Reason *string // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupRequest) Key() int16 { return 11 } +func (*JoinGroupRequest) MaxVersion() int16 { return 9 } +func (v *JoinGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupRequest) GetVersion() int16 { return v.Version } +func (v *JoinGroupRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupRequest) IsGroupCoordinatorRequest() {} +func (v *JoinGroupRequest) ResponseKind() Response { + r := &JoinGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *JoinGroupRequest) RequestWith(ctx context.Context, r Requestor) (*JoinGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*JoinGroupResponse) + return resp, err +} + +func (v *JoinGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocols + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + v := s.Protocols + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupRequestProtocol, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Protocols = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupRequest returns a pointer to a default JoinGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupRequest() *JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequest. +func (v *JoinGroupRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewJoinGroupRequest returns a default JoinGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequest() JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return v +} + +type JoinGroupResponseMember struct { + // MemberID is a member in this group. + MemberID string + + // InstanceID is an instance ID of a member in this group (KIP-345). + InstanceID *string // v5+ + + // ProtocolMetadata is the metadata for this member for this protocol. + // This is usually of type GroupMemberMetadata. + ProtocolMetadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponseMember. +func (v *JoinGroupResponseMember) Default() { +} + +// NewJoinGroupResponseMember returns a default JoinGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponseMember() JoinGroupResponseMember { + var v JoinGroupResponseMember + v.Default() + return v +} + +// JoinGroupResponse is returned from a JoinGroupRequest. +type JoinGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // ErrorCode is the error for the join group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // INVALID_SESSION_TIMEOUT is returned if the requested SessionTimeout is + // not within the broker's group.{min,max}.session.timeout.ms. + // + // INCONSISTENT_GROUP_PROTOCOL is returned if the requested protocols are + // incompatible with the existing group member's protocols, or if the join + // was for a new group but contained no protocols. + // + // UNKNOWN_MEMBER_ID is returned is the requested group is dead (likely + // just migrated to another coordinator or the group is temporarily unstable), + // or if the request was for a new group but contained a non-empty member ID, + // or if the group does not have the requested member ID (and the client must + // do the new-join-group dance). + // + // MEMBER_ID_REQUIRED is returned on the initial join of an existing group. + // This error was proposed in KIP-394 and introduced in Kafka 2.2.0 to + // prevent flaky clients from continually triggering rebalances and prevent + // these clients from consuming RAM with metadata. If a client sees + // this error, it should re-issue the join with the MemberID in the response. + // Non-flaky clients will join with this new member ID, but flaky clients + // will not join quickly enough before the pending member ID is rotated out + // due to hitting the session.timeout.ms. + // + // GROUP_MAX_SIZE_REACHED is returned as of Kafka 2.2.0 if the group has + // reached a broker's group.max.size. + ErrorCode int16 + + // Generation is the current "generation" of this group. + // + // This field has a default of -1. + Generation int32 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v7+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + // + // v7 of this response changed this field to be nullable. + Protocol *string + + // LeaderID is the leader member. + LeaderID string + + // True if the leader must skip running the assignment (KIP-814, Kafka 3.2+). + SkipAssignment bool // v9+ + + // MemberID is the member of the receiving client. + MemberID string + + // Members contains all other members of this group. Only the group leader + // receives the members. The leader is responsible for balancing subscribed + // topic partitions and replying appropriately in a SyncGroup request. + Members []JoinGroupResponseMember + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupResponse) Key() int16 { return 11 } +func (*JoinGroupResponse) MaxVersion() int16 { return 9 } +func (v *JoinGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupResponse) GetVersion() int16 { return v.Version } +func (v *JoinGroupResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *JoinGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *JoinGroupResponse) RequestKind() Request { return &JoinGroupRequest{Version: v.Version} } + +func (v *JoinGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Protocol + if version < 7 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + { + v := v.LeaderID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.SkipAssignment + dst = kbin.AppendBool(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Generation = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + { + var v *string + if version < 7 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Protocol = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.LeaderID = v + } + if version >= 9 { + v := b.Bool() + s.SkipAssignment = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupResponse returns a pointer to a default JoinGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupResponse() *JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponse. +func (v *JoinGroupResponse) Default() { + v.Generation = -1 +} + +// NewJoinGroupResponse returns a default JoinGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponse() JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return v +} + +// HeartbeatRequest issues a heartbeat for a member in a group, ensuring that +// Kafka does not expire the member from the group. +type HeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this heartbeat is for. + Group string + + // Generation is the group generation this heartbeat is for. + Generation int32 + + // MemberID is the member ID this member is for. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatRequest) Key() int16 { return 12 } +func (*HeartbeatRequest) MaxVersion() int16 { return 4 } +func (v *HeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *HeartbeatRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatRequest) IsGroupCoordinatorRequest() {} +func (v *HeartbeatRequest) ResponseKind() Response { + r := &HeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *HeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*HeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*HeartbeatResponse) + return resp, err +} + +func (v *HeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatRequest returns a pointer to a default HeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatRequest() *HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatRequest. +func (v *HeartbeatRequest) Default() { +} + +// NewHeartbeatRequest returns a default HeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatRequest() HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return v +} + +// HeartbeatResponse is returned from a HeartbeatRequest. +type HeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the heartbeat request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group is currently rebalancing. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatResponse) Key() int16 { return 12 } +func (*HeartbeatResponse) MaxVersion() int16 { return 4 } +func (v *HeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *HeartbeatResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *HeartbeatResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *HeartbeatResponse) RequestKind() Request { return &HeartbeatRequest{Version: v.Version} } + +func (v *HeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatResponse returns a pointer to a default HeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatResponse() *HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatResponse. +func (v *HeartbeatResponse) Default() { +} + +// NewHeartbeatResponse returns a default HeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatResponse() HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return v +} + +type LeaveGroupRequestMember struct { + MemberID string + + InstanceID *string + + // Reason is an optional reason why this member is leaving the group + // (KIP-800, Kafka 3.2+). + Reason *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequestMember. +func (v *LeaveGroupRequestMember) Default() { +} + +// NewLeaveGroupRequestMember returns a default LeaveGroupRequestMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequestMember() LeaveGroupRequestMember { + var v LeaveGroupRequestMember + v.Default() + return v +} + +// LeaveGroupRequest issues a request for a group member to leave the group, +// triggering a group rebalance. +// +// Version 3 changed removed MemberID and added a batch instance+member ID +// way of leaving a group. +type LeaveGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to leave. + Group string + + // MemberID is the member that is leaving. + MemberID string // v0-v2 + + // Members are member and group instance IDs to cause to leave a group. + Members []LeaveGroupRequestMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupRequest) Key() int16 { return 13 } +func (*LeaveGroupRequest) MaxVersion() int16 { return 5 } +func (v *LeaveGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupRequest) GetVersion() int16 { return v.Version } +func (v *LeaveGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupRequest) IsGroupCoordinatorRequest() {} +func (v *LeaveGroupRequest) ResponseKind() Response { + r := &LeaveGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaveGroupRequest) RequestWith(ctx context.Context, r Requestor) (*LeaveGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaveGroupResponse) + return resp, err +} + +func (v *LeaveGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 2 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 2 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupRequestMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupRequest returns a pointer to a default LeaveGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupRequest() *LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequest. +func (v *LeaveGroupRequest) Default() { +} + +// NewLeaveGroupRequest returns a default LeaveGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequest() LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return v +} + +type LeaveGroupResponseMember struct { + MemberID string + + InstanceID *string + + // An individual member's leave error code. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponseMember. +func (v *LeaveGroupResponseMember) Default() { +} + +// NewLeaveGroupResponseMember returns a default LeaveGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponseMember() LeaveGroupResponseMember { + var v LeaveGroupResponseMember + v.Default() + return v +} + +// LeaveGroupResponse is returned from a LeaveGroupRequest. +type LeaveGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the leave group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + ErrorCode int16 + + // Members are the list of members and group instance IDs that left the group. + Members []LeaveGroupResponseMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupResponse) Key() int16 { return 13 } +func (*LeaveGroupResponse) MaxVersion() int16 { return 5 } +func (v *LeaveGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupResponse) GetVersion() int16 { return v.Version } +func (v *LeaveGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *LeaveGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *LeaveGroupResponse) RequestKind() Request { return &LeaveGroupRequest{Version: v.Version} } + +func (v *LeaveGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupResponse returns a pointer to a default LeaveGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupResponse() *LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponse. +func (v *LeaveGroupResponse) Default() { +} + +// NewLeaveGroupResponse returns a default LeaveGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponse() LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return v +} + +type SyncGroupRequestGroupAssignment struct { + // MemberID is the member this assignment is for. + MemberID string + + // MemberAssignment is the assignment for this member. This is typically + // of type GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequestGroupAssignment. +func (v *SyncGroupRequestGroupAssignment) Default() { +} + +// NewSyncGroupRequestGroupAssignment returns a default SyncGroupRequestGroupAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequestGroupAssignment() SyncGroupRequestGroupAssignment { + var v SyncGroupRequestGroupAssignment + v.Default() + return v +} + +// SyncGroupRequest is issued by all group members after they receive a a +// response for JoinGroup. The group leader is responsible for sending member +// assignments with the request; all other members do not. +// +// Once the leader sends the group assignment, all members will be replied to. +type SyncGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this sync group is for. + Group string + + // Generation is the group generation this sync is for. + Generation int32 + + // MemberID is the member ID this member is. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // GroupAssignment, sent only from the group leader, is the topic partition + // assignment it has decided on for all members. + GroupAssignment []SyncGroupRequestGroupAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupRequest) Key() int16 { return 14 } +func (*SyncGroupRequest) MaxVersion() int16 { return 5 } +func (v *SyncGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupRequest) GetVersion() int16 { return v.Version } +func (v *SyncGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupRequest) IsGroupCoordinatorRequest() {} +func (v *SyncGroupRequest) ResponseKind() Response { + r := &SyncGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SyncGroupRequest) RequestWith(ctx context.Context, r Requestor) (*SyncGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SyncGroupResponse) + return resp, err +} + +func (v *SyncGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.GroupAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + v := s.GroupAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]SyncGroupRequestGroupAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GroupAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupRequest returns a pointer to a default SyncGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupRequest() *SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequest. +func (v *SyncGroupRequest) Default() { +} + +// NewSyncGroupRequest returns a default SyncGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequest() SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return v +} + +// SyncGroupResponse is returned from a SyncGroupRequest. +type SyncGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the sync group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group switched back to rebalancing. + // + // UNKNOWN_SERVER_ERROR is returned if the store of the group assignment + // resulted in a too large message. + ErrorCode int16 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // MemberAssignment is the assignment for this member that the leader + // determined. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupResponse) Key() int16 { return 14 } +func (*SyncGroupResponse) MaxVersion() int16 { return 5 } +func (v *SyncGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupResponse) GetVersion() int16 { return v.Version } +func (v *SyncGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *SyncGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *SyncGroupResponse) RequestKind() Request { return &SyncGroupRequest{Version: v.Version} } + +func (v *SyncGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupResponse returns a pointer to a default SyncGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupResponse() *SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupResponse. +func (v *SyncGroupResponse) Default() { +} + +// NewSyncGroupResponse returns a default SyncGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupResponse() SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return v +} + +// DescribeGroupsRequest requests metadata for group IDs. +type DescribeGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is an array of group IDs to request metadata for. + Groups []string + + // IncludeAuthorizedOperations, introduced in Kafka 2.3.0, specifies + // whether to include a bitfield of AclOperations this client can perform + // on the groups. See KIP-430 for more details. + IncludeAuthorizedOperations bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsRequest) Key() int16 { return 15 } +func (*DescribeGroupsRequest) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DescribeGroupsRequest) ResponseKind() Response { + r := &DescribeGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeGroupsResponse) + return resp, err +} + +func (v *DescribeGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 3 { + v := v.IncludeAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if version >= 3 { + v := b.Bool() + s.IncludeAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsRequest returns a pointer to a default DescribeGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsRequest() *DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsRequest. +func (v *DescribeGroupsRequest) Default() { +} + +// NewDescribeGroupsRequest returns a default DescribeGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsRequest() DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return v +} + +type DescribeGroupsResponseGroupMember struct { + // MemberID is the member ID of a member in this group. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v4+ + + // ClientID is the client ID used by this member. + ClientID string + + // ClientHost is the host this client is running on. + ClientHost string + + // ProtocolMetadata is the metadata this member included when joining + // the group. If using normal (Java-like) consumers, this will be of + // type GroupMemberMetadata. + ProtocolMetadata []byte + + // MemberAssignment is the assignment for this member in the group. + // If using normal (Java-like) consumers, this will be of type + // GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroupMember. +func (v *DescribeGroupsResponseGroupMember) Default() { +} + +// NewDescribeGroupsResponseGroupMember returns a default DescribeGroupsResponseGroupMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroupMember() DescribeGroupsResponseGroupMember { + var v DescribeGroupsResponseGroupMember + v.Default() + return v +} + +type DescribeGroupsResponseGroup struct { + // ErrorCode is the error code for an individual group in a request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the + // coordinator for this group. + ErrorCode int16 + + // Group is the id of this group. + Group string + + // State is the state this group is in. + State string + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType string + + // Protocol is the agreed upon protocol for all members in this group. + Protocol string + + // Members contains members in this group. + Members []DescribeGroupsResponseGroupMember + + // AuthorizedOperations is a bitfield containing which operations the + // the client is allowed to perform on this group. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroup. +func (v *DescribeGroupsResponseGroup) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewDescribeGroupsResponseGroup returns a default DescribeGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroup() DescribeGroupsResponseGroup { + var v DescribeGroupsResponseGroup + v.Default() + return v +} + +// DescribeGroupsResponse is returned from a DescribeGroupsRequest. +type DescribeGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Groups is an array of group metadata. + Groups []DescribeGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsResponse) Key() int16 { return 15 } +func (*DescribeGroupsResponse) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeGroupsResponse) RequestKind() Request { + return &DescribeGroupsRequest{Version: v.Version} +} + +func (v *DescribeGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ClientID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ClientHost + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Protocol = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroupMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 4 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientHost = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if version >= 3 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsResponse returns a pointer to a default DescribeGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsResponse() *DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponse. +func (v *DescribeGroupsResponse) Default() { +} + +// NewDescribeGroupsResponse returns a default DescribeGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponse() DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return v +} + +// ListGroupsRequest issues a request to list all groups. +// +// To list all groups in a cluster, this must be issued to every broker. +type ListGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // StatesFilter, proposed in KIP-518 and introduced in Kafka 2.6.0, + // allows filtering groups by state, where a state is any of + // "Preparing", "PreparingRebalance", "CompletingRebalance", "Stable", + // "Dead", or "Empty". If empty, all groups are returned. + StatesFilter []string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsRequest) Key() int16 { return 16 } +func (*ListGroupsRequest) MaxVersion() int16 { return 4 } +func (v *ListGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsRequest) GetVersion() int16 { return v.Version } +func (v *ListGroupsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsRequest) ResponseKind() Response { + r := &ListGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*ListGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListGroupsResponse) + return resp, err +} + +func (v *ListGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 4 { + v := v.StatesFilter + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 4 { + v := s.StatesFilter + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StatesFilter = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsRequest returns a pointer to a default ListGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsRequest() *ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsRequest. +func (v *ListGroupsRequest) Default() { +} + +// NewListGroupsRequest returns a default ListGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsRequest() ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return v +} + +type ListGroupsResponseGroup struct { + // Group is a Kafka group. + Group string + + // ProtocolType is the protocol type in use by the group. + ProtocolType string + + // The group state. + GroupState string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponseGroup. +func (v *ListGroupsResponseGroup) Default() { +} + +// NewListGroupsResponseGroup returns a default ListGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponseGroup() ListGroupsResponseGroup { + var v ListGroupsResponseGroup + v.Default() + return v +} + +// ListGroupsResponse is returned from a ListGroupsRequest. +type ListGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the list groups request. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group manager is loading. + ErrorCode int16 + + // Groups is the list of groups Kafka knows of. + Groups []ListGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsResponse) Key() int16 { return 16 } +func (*ListGroupsResponse) MaxVersion() int16 { return 4 } +func (v *ListGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsResponse) GetVersion() int16 { return v.Version } +func (v *ListGroupsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ListGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListGroupsResponse) RequestKind() Request { return &ListGroupsRequest{Version: v.Version} } + +func (v *ListGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.GroupState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + if version >= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.GroupState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsResponse returns a pointer to a default ListGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsResponse() *ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponse. +func (v *ListGroupsResponse) Default() { +} + +// NewListGroupsResponse returns a default ListGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponse() ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return v +} + +// SASLHandshakeRequest begins the sasl authentication flow. Note that Kerberos +// GSSAPI authentication has its own unique flow. +type SASLHandshakeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Mechanism is the mechanism to use for the sasl handshake (e.g., "PLAIN"). + // + // For version 0, if this mechanism is supported, it is expected that the + // client immediately authenticates using this mechanism. Note that the + // only mechanism exclusive to v0 is PLAIN. + // + // For version 1, if the mechanism is supported, the next request to issue + // is SASLHandshakeRequest. + Mechanism string +} + +func (*SASLHandshakeRequest) Key() int16 { return 17 } +func (*SASLHandshakeRequest) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeRequest) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeRequest) IsFlexible() bool { return false } +func (v *SASLHandshakeRequest) ResponseKind() Response { + r := &SASLHandshakeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLHandshakeRequest) RequestWith(ctx context.Context, r Requestor) (*SASLHandshakeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLHandshakeResponse) + return resp, err +} + +func (v *SASLHandshakeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Mechanism + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *SASLHandshakeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Mechanism = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeRequest returns a pointer to a default SASLHandshakeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeRequest() *SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeRequest. +func (v *SASLHandshakeRequest) Default() { +} + +// NewSASLHandshakeRequest returns a default SASLHandshakeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeRequest() SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return v +} + +// SASLHandshakeResponse is returned for a SASLHandshakeRequest. +type SASLHandshakeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is non-zero for ILLEGAL_SASL_STATE, meaning a sasl handshake + // is not expected at this point in the connection, or UNSUPPORTED_SASL_MECHANISM, + // meaning the requested mechanism is not supported. + ErrorCode int16 + + // SupportedMechanisms is the list of mechanisms supported if this request + // errored. + SupportedMechanisms []string +} + +func (*SASLHandshakeResponse) Key() int16 { return 17 } +func (*SASLHandshakeResponse) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeResponse) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeResponse) IsFlexible() bool { return false } +func (v *SASLHandshakeResponse) RequestKind() Request { + return &SASLHandshakeRequest{Version: v.Version} +} + +func (v *SASLHandshakeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.SupportedMechanisms + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + return dst +} + +func (v *SASLHandshakeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.SupportedMechanisms + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.SupportedMechanisms = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeResponse returns a pointer to a default SASLHandshakeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeResponse() *SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeResponse. +func (v *SASLHandshakeResponse) Default() { +} + +// NewSASLHandshakeResponse returns a default SASLHandshakeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeResponse() SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return v +} + +// ApiVersionsRequest requests what API versions a Kafka broker supports. +// +// Note that the client does not know the version a broker supports before +// sending this request. +// +// Before Kafka 2.4.0, if the client used a version larger than the broker +// understands, the broker would reply with an UNSUPPORTED_VERSION error using +// the version 0 message format (i.e., 6 bytes long!). The client should retry +// with a lower version. +// +// After Kafka 2.4.0, if the client uses a version larger than the broker +// understands, the broker replies with UNSUPPORTED_VERSIONS using the version +// 0 message format but additionally includes the api versions the broker does +// support. +type ApiVersionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ClientSoftwareName, added for KIP-511 with Kafka 2.4.0, is the name of the + // client issuing this request. The broker can use this to enrich its own + // debugging information of which version of what clients are connected. + // + // If using v3, this field is required and must match the following pattern: + // + // [a-zA-Z0-9](?:[a-zA-Z0-9\\-.]*[a-zA-Z0-9])? + ClientSoftwareName string // v3+ + + // ClientSoftwareVersion is the version of the software name in the prior + // field. It must match the same regex (thus, this is also required). + ClientSoftwareVersion string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsRequest) Key() int16 { return 18 } +func (*ApiVersionsRequest) MaxVersion() int16 { return 3 } +func (v *ApiVersionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsRequest) GetVersion() int16 { return v.Version } +func (v *ApiVersionsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsRequest) ResponseKind() Response { + r := &ApiVersionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ApiVersionsRequest) RequestWith(ctx context.Context, r Requestor) (*ApiVersionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ApiVersionsResponse) + return resp, err +} + +func (v *ApiVersionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 3 { + v := v.ClientSoftwareName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ClientSoftwareVersion + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrApiVersionsRequest returns a pointer to a default ApiVersionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsRequest() *ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsRequest. +func (v *ApiVersionsRequest) Default() { +} + +// NewApiVersionsRequest returns a default ApiVersionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsRequest() ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return v +} + +type ApiVersionsResponseApiKey struct { + // ApiKey is the key of a message request. + ApiKey int16 + + // MinVersion is the min version a broker supports for an API key. + MinVersion int16 + + // MaxVersion is the max version a broker supports for an API key. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseApiKey. +func (v *ApiVersionsResponseApiKey) Default() { +} + +// NewApiVersionsResponseApiKey returns a default ApiVersionsResponseApiKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseApiKey() ApiVersionsResponseApiKey { + var v ApiVersionsResponseApiKey + v.Default() + return v +} + +type ApiVersionsResponseSupportedFeature struct { + // The name of the feature. + Name string + + // The minimum supported version for the feature. + MinVersion int16 + + // The maximum supported version for the feature. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseSupportedFeature. +func (v *ApiVersionsResponseSupportedFeature) Default() { +} + +// NewApiVersionsResponseSupportedFeature returns a default ApiVersionsResponseSupportedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseSupportedFeature() ApiVersionsResponseSupportedFeature { + var v ApiVersionsResponseSupportedFeature + v.Default() + return v +} + +type ApiVersionsResponseFinalizedFeature struct { + // The name of the feature. + Name string + + // The cluster-wide finalized max version level for the feature. + MaxVersionLevel int16 + + // The cluster-wide finalized min version level for the feature. + MinVersionLevel int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseFinalizedFeature. +func (v *ApiVersionsResponseFinalizedFeature) Default() { +} + +// NewApiVersionsResponseFinalizedFeature returns a default ApiVersionsResponseFinalizedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseFinalizedFeature() ApiVersionsResponseFinalizedFeature { + var v ApiVersionsResponseFinalizedFeature + v.Default() + return v +} + +// ApiVersionsResponse is returned from an ApiVersionsRequest. +type ApiVersionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is UNSUPPORTED_VERSION if the request was issued with a higher + // version than the broker supports. Before Kafka 2.4.0, if this error is + // returned, the rest of this struct will be empty. + // + // Starting in Kafka 2.4.0 (with version 3), even with an UNSUPPORTED_VERSION + // error, the broker still replies with the ApiKeys it supports. + ErrorCode int16 + + // ApiKeys is an array corresponding to API keys the broker supports + // and the range of supported versions for each key. + ApiKeys []ApiVersionsResponseApiKey + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Features supported by the broker (see KIP-584). + SupportedFeatures []ApiVersionsResponseSupportedFeature // tag 0 + + // The monotonically increasing epoch for the finalized features information, + // where -1 indicates an unknown epoch. + // + // This field has a default of -1. + FinalizedFeaturesEpoch int64 // tag 1 + + // The list of cluster-wide finalized features (only valid if + // FinalizedFeaturesEpoch is >= 0). + FinalizedFeatures []ApiVersionsResponseFinalizedFeature // tag 2 + + // Set by a KRaft controller if the required configurations for ZK migration + // are present + ZkMigrationReady bool // tag 3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsResponse) Key() int16 { return 18 } +func (*ApiVersionsResponse) MaxVersion() int16 { return 3 } +func (v *ApiVersionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsResponse) GetVersion() int16 { return v.Version } +func (v *ApiVersionsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ApiVersionsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ApiVersionsResponse) RequestKind() Request { return &ApiVersionsRequest{Version: v.Version} } + +func (v *ApiVersionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ApiKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ApiKey + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.SupportedFeatures) > 0 { + toEncode = append(toEncode, 0) + } + if v.FinalizedFeaturesEpoch != -1 { + toEncode = append(toEncode, 1) + } + if len(v.FinalizedFeatures) > 0 { + toEncode = append(toEncode, 2) + } + if v.ZkMigrationReady != false { + toEncode = append(toEncode, 3) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.SupportedFeatures + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fSupportedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSupportedFeatures + } + } + case 1: + { + v := v.FinalizedFeaturesEpoch + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendUvarint(dst, 8) + dst = kbin.AppendInt64(dst, v) + } + case 2: + { + v := v.FinalizedFeatures + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fFinalizedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fFinalizedFeatures + } + } + case 3: + { + v := v.ZkMigrationReady + dst = kbin.AppendUvarint(dst, 3) + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendBool(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.ApiKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseApiKey, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ApiKey = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ApiKeys = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.SupportedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseSupportedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.SupportedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int64() + s.FinalizedFeaturesEpoch = v + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.FinalizedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseFinalizedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + { + v := b.Int16() + s.MinVersionLevel = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FinalizedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 3: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Bool() + s.ZkMigrationReady = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrApiVersionsResponse returns a pointer to a default ApiVersionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsResponse() *ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponse. +func (v *ApiVersionsResponse) Default() { + v.FinalizedFeaturesEpoch = -1 +} + +// NewApiVersionsResponse returns a default ApiVersionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponse() ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return v +} + +type CreateTopicsRequestTopicReplicaAssignment struct { + // Partition is a partition to create. + Partition int32 + + // Replicas are broker IDs the partition must exist on. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicReplicaAssignment. +func (v *CreateTopicsRequestTopicReplicaAssignment) Default() { +} + +// NewCreateTopicsRequestTopicReplicaAssignment returns a default CreateTopicsRequestTopicReplicaAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicReplicaAssignment() CreateTopicsRequestTopicReplicaAssignment { + var v CreateTopicsRequestTopicReplicaAssignment + v.Default() + return v +} + +type CreateTopicsRequestTopicConfig struct { + // Name is a topic level config key (e.g. segment.bytes). + Name string + + // Value is a topic level config value (e.g. 1073741824) + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicConfig. +func (v *CreateTopicsRequestTopicConfig) Default() { +} + +// NewCreateTopicsRequestTopicConfig returns a default CreateTopicsRequestTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicConfig() CreateTopicsRequestTopicConfig { + var v CreateTopicsRequestTopicConfig + v.Default() + return v +} + +type CreateTopicsRequestTopic struct { + // Topic is a topic to create. + Topic string + + // NumPartitions is how many partitions to give a topic. This must + // be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default partitions. + NumPartitions int32 + + // ReplicationFactor is how many replicas every partition must have. + // This must be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default replication factor. + ReplicationFactor int16 + + // ReplicaAssignment is an array to manually dicate replicas and their + // partitions for a topic. If using this, both ReplicationFactor and + // NumPartitions must be -1. + ReplicaAssignment []CreateTopicsRequestTopicReplicaAssignment + + // Configs is an array of key value config pairs for a topic. + // These correspond to Kafka Topic-Level Configs: http://kafka.apache.org/documentation/#topicconfigs. + Configs []CreateTopicsRequestTopicConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopic. +func (v *CreateTopicsRequestTopic) Default() { +} + +// NewCreateTopicsRequestTopic returns a default CreateTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopic() CreateTopicsRequestTopic { + var v CreateTopicsRequestTopic + v.Default() + return v +} + +// CreateTopicsRequest creates Kafka topics. +// +// Version 4, introduced in Kafka 2.4.0, implies client support for +// creation defaults. See KIP-464. +// +// Version 5, also in 2.4.0, returns topic configs in the response (KIP-525). +type CreateTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to attempt to create. + Topics []CreateTopicsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no topics are actually created. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsRequest) Key() int16 { return 19 } +func (*CreateTopicsRequest) MaxVersion() int16 { return 7 } +func (v *CreateTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsRequest) GetVersion() int16 { return v.Version } +func (v *CreateTopicsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreateTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreateTopicsRequest) IsAdminRequest() {} +func (v *CreateTopicsRequest) ResponseKind() Response { + r := &CreateTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateTopicsResponse) + return resp, err +} + +func (v *CreateTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ReplicaAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.NumPartitions = v + } + { + v := b.Int16() + s.ReplicationFactor = v + } + { + v := s.ReplicaAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicReplicaAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ReplicaAssignment = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsRequest returns a pointer to a default CreateTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsRequest() *CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequest. +func (v *CreateTopicsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewCreateTopicsRequest returns a default CreateTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequest() CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return v +} + +type CreateTopicsResponseTopicConfig struct { + // Name is the configuration name (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + ReadOnly bool + + // Source is where this config entry is from. See the documentation + // on DescribeConfigsRequest's Source for more details. + // + // This field has a default of -1. + Source int8 + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopicConfig. +func (v *CreateTopicsResponseTopicConfig) Default() { + v.Source = -1 +} + +// NewCreateTopicsResponseTopicConfig returns a default CreateTopicsResponseTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopicConfig() CreateTopicsResponseTopicConfig { + var v CreateTopicsResponseTopicConfig + v.Default() + return v +} + +type CreateTopicsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // The unique topic ID. + TopicID [16]byte // v7+ + + // ErrorCode is the error code for an individual topic creation. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized. + // + // INVALID_REQUEST is returned if the same topic occurred multiple times + // in the request. + // + // POLICY_VIOLATION is returned if the broker is using a + // create.topic.policy.class.name that returns a policy violation. + // + // INVALID_TOPIC_EXCEPTION if the topic collides with another topic when + // both topic's names' periods are replaced with underscores (e.g. + // topic.foo and topic_foo collide). + // + // TOPIC_ALREADY_EXISTS is returned if the topic already exists. + // + // INVALID_PARTITIONS is returned if the requested number of partitions is + // <= 0. + // + // INVALID_REPLICATION_FACTOR is returned if the requested replication + // factor is <= 0. + // + // INVALID_REPLICA_ASSIGNMENT is returned if not all partitions have the same + // number of replicas, or duplica replicas are assigned, or the partitions + // are not consecutive starting from 0. + // + // INVALID_CONFIG is returned if the requested topic config is invalid. + // to create a topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string // v1+ + + // ConfigErrorCode is non-zero if configs are unable to be returned. + // + // This is the first tagged field, introduced in version 5. As such, it is + // only possible to be present in v5+. + ConfigErrorCode int16 // tag 0 + + // NumPartitions is how many partitions were created for this topic. + // + // This field has a default of -1. + NumPartitions int32 // v5+ + + // ReplicationFactor is how many replicas every partition has for this topic. + // + // This field has a default of -1. + ReplicationFactor int16 // v5+ + + // Configs contains this topic's configuration. + Configs []CreateTopicsResponseTopicConfig // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopic. +func (v *CreateTopicsResponseTopic) Default() { + v.NumPartitions = -1 + v.ReplicationFactor = -1 +} + +// NewCreateTopicsResponseTopic returns a default CreateTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopic() CreateTopicsResponseTopic { + var v CreateTopicsResponseTopic + v.Default() + return v +} + +// CreateTopicsResponse is returned from a CreateTopicsRequest. +type CreateTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics contains responses to the requested topic creations. + Topics []CreateTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsResponse) Key() int16 { return 19 } +func (*CreateTopicsResponse) MaxVersion() int16 { return 7 } +func (v *CreateTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsResponse) GetVersion() int16 { return v.Version } +func (v *CreateTopicsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *CreateTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateTopicsResponse) RequestKind() Request { return &CreateTopicsRequest{Version: v.Version} } + +func (v *CreateTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Source + dst = kbin.AppendInt8(dst, v) + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ConfigErrorCode != 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ConfigErrorCode + dst = kbin.AppendUvarint(dst, 0) + dst = kbin.AppendUvarint(dst, 2) + dst = kbin.AppendInt16(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 5 { + v := b.Int32() + s.NumPartitions = v + } + if version >= 5 { + v := b.Int16() + s.ReplicationFactor = v + } + if version >= 5 { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreateTopicsResponseTopicConfig{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + { + v := b.Int8() + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int16() + s.ConfigErrorCode = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsResponse returns a pointer to a default CreateTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsResponse() *CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponse. +func (v *CreateTopicsResponse) Default() { +} + +// NewCreateTopicsResponse returns a default CreateTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponse() CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return v +} + +type DeleteTopicsRequestTopic struct { + Topic *string + + TopicID [16]byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequestTopic. +func (v *DeleteTopicsRequestTopic) Default() { +} + +// NewDeleteTopicsRequestTopic returns a default DeleteTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequestTopic() DeleteTopicsRequestTopic { + var v DeleteTopicsRequestTopic + v.Default() + return v +} + +// DeleteTopicsRequest deletes Kafka topics. +type DeleteTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to delete. + TopicNames []string // v0-v5 + + // The name or topic ID of topics to delete. + Topics []DeleteTopicsRequestTopic // v6+ + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsRequest) Key() int16 { return 20 } +func (*DeleteTopicsRequest) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteTopicsRequest) IsAdminRequest() {} +func (v *DeleteTopicsRequest) ResponseKind() Response { + r := &DeleteTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteTopicsResponse) + return resp, err +} + +func (v *DeleteTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 0 && version <= 5 { + v := v.TopicNames + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 6 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 0 && version <= 5 { + v := s.TopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TopicNames = v + } + if version >= 6 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Topic = v + } + { + v := b.Uuid() + s.TopicID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsRequest returns a pointer to a default DeleteTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsRequest() *DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequest. +func (v *DeleteTopicsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteTopicsRequest returns a default DeleteTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequest() DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return v +} + +type DeleteTopicsResponseTopic struct { + // Topic is the topic requested for deletion. + Topic *string + + // The topic ID requested for deletion. + TopicID [16]byte // v6+ + + // ErrorCode is the error code returned for an individual topic in + // deletion request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_DELETION_DISABLED is returned for deletion requests version 3+ + // and brokers >= 2.1.0. INVALID_REQUEST is issued for request versions + // 0-2 against brokers >= 2.1.0. Otherwise, the request hangs until it + // times out. + // + // UNSUPPORTED_VERSION is returned when using topic IDs with a cluster + // that is not yet Kafka v2.8+. + // + // UNKNOWN_TOPIC_ID is returned when using topic IDs to a Kafka cluster + // v2.8+ and the topic ID is not found. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponseTopic. +func (v *DeleteTopicsResponseTopic) Default() { +} + +// NewDeleteTopicsResponseTopic returns a default DeleteTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponseTopic() DeleteTopicsResponseTopic { + var v DeleteTopicsResponseTopic + v.Default() + return v +} + +// DeleteTopicsResponse is returned from a DeleteTopicsRequest. +// Version 3 added the TOPIC_DELETION_DISABLED error proposed in KIP-322 +// and introduced in Kafka 2.1.0. Prior, the request timed out. +type DeleteTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Topics contains responses for each topic requested for deletion. + Topics []DeleteTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsResponse) Key() int16 { return 20 } +func (*DeleteTopicsResponse) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DeleteTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteTopicsResponse) RequestKind() Request { return &DeleteTopicsRequest{Version: v.Version} } + +func (v *DeleteTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if version < 6 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 6 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if version < 6 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 6 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsResponse returns a pointer to a default DeleteTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsResponse() *DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponse. +func (v *DeleteTopicsResponse) Default() { +} + +// NewDeleteTopicsResponse returns a default DeleteTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponse() DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return v +} + +type DeleteRecordsRequestTopicPartition struct { + // Partition is a partition to delete records from. + Partition int32 + + // Offset is the offset to set the partition's low watermark (start + // offset) to. After a successful response, all records before this + // offset are considered deleted and are no longer readable. + // + // To delete all records, use -1, which is mapped to the partition's + // current high watermark. + Offset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopicPartition. +func (v *DeleteRecordsRequestTopicPartition) Default() { +} + +// NewDeleteRecordsRequestTopicPartition returns a default DeleteRecordsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopicPartition() DeleteRecordsRequestTopicPartition { + var v DeleteRecordsRequestTopicPartition + v.Default() + return v +} + +type DeleteRecordsRequestTopic struct { + // Topic is a topic to delete records from. + Topic string + + // Partitions contains partitions to delete records from. + Partitions []DeleteRecordsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopic. +func (v *DeleteRecordsRequestTopic) Default() { +} + +// NewDeleteRecordsRequestTopic returns a default DeleteRecordsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopic() DeleteRecordsRequestTopic { + var v DeleteRecordsRequestTopic + v.Default() + return v +} + +// DeleteRecordsRequest is an admin request to delete records from Kafka. +// This was added for KIP-107. +// +// To delete records, Kafka sets the LogStartOffset for partitions to +// the requested offset. All segments whose max partition is before the +// requested offset are deleted, and any records within the segment before +// the requested offset can no longer be read. +// +// This request must be issued to the correct brokers that own the partitions +// you intend to delete records for. +type DeleteRecordsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics for which to delete records from. + Topics []DeleteRecordsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsRequest) Key() int16 { return 21 } +func (*DeleteRecordsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteRecordsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteRecordsRequest) ResponseKind() Response { + r := &DeleteRecordsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteRecordsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteRecordsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteRecordsResponse) + return resp, err +} + +func (v *DeleteRecordsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsRequest returns a pointer to a default DeleteRecordsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsRequest() *DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequest. +func (v *DeleteRecordsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteRecordsRequest returns a default DeleteRecordsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequest() DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return v +} + +type DeleteRecordsResponseTopicPartition struct { + // Partition is the partition this response corresponds to. + Partition int32 + + // LowWatermark is the new earliest offset for this partition. + LowWatermark int64 + + // ErrorCode is the error code returned for a given partition in + // the delete request. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all partitions if the + // client is not authorized to delete records. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all partitions that + // the requested broker does not know of. + // + // NOT_LEADER_FOR_PARTITION is returned for partitions that the + // requested broker is not a leader of. + // + // OFFSET_OUT_OF_RANGE is returned if the requested offset is + // negative or higher than the current high watermark. + // + // POLICY_VIOLATION is returned if records cannot be deleted due to + // broker configuration. + // + // KAFKA_STORAGE_EXCEPTION is returned if the partition is in an + // offline log directory. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopicPartition. +func (v *DeleteRecordsResponseTopicPartition) Default() { +} + +// NewDeleteRecordsResponseTopicPartition returns a default DeleteRecordsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopicPartition() DeleteRecordsResponseTopicPartition { + var v DeleteRecordsResponseTopicPartition + v.Default() + return v +} + +type DeleteRecordsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions contains responses for each partition in a requested topic + // in the delete records request. + Partitions []DeleteRecordsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopic. +func (v *DeleteRecordsResponseTopic) Default() { +} + +// NewDeleteRecordsResponseTopic returns a default DeleteRecordsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopic() DeleteRecordsResponseTopic { + var v DeleteRecordsResponseTopic + v.Default() + return v +} + +// DeleteRecordsResponse is returned from a DeleteRecordsRequest. +type DeleteRecordsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses for each topic in the delete records request. + Topics []DeleteRecordsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsResponse) Key() int16 { return 21 } +func (*DeleteRecordsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteRecordsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteRecordsResponse) RequestKind() Request { + return &DeleteRecordsRequest{Version: v.Version} +} + +func (v *DeleteRecordsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LowWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.LowWatermark = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsResponse returns a pointer to a default DeleteRecordsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsResponse() *DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponse. +func (v *DeleteRecordsResponse) Default() { +} + +// NewDeleteRecordsResponse returns a default DeleteRecordsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponse() DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return v +} + +// InitProducerIDRequest initializes a producer ID for idempotent transactions, +// and if using transactions, a producer epoch. This is the first request +// necessary to begin idempotent producing or transactions. +// +// Note that you do not need to go to a txn coordinator if you are initializing +// a producer id without a transactional id. +type InitProducerIDRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the ID to use for transactions if using transactions. + TransactionalID *string + + // TransactionTimeoutMillis is how long a transaction is allowed before + // EndTxn is required. + // + // Note that this timeout only begins on the first AddPartitionsToTxn + // request. + TransactionTimeoutMillis int32 + + // ProducerID, added for KIP-360, is the current producer ID. This allows + // the client to potentially recover on UNKNOWN_PRODUCER_ID errors. + // + // This field has a default of -1. + ProducerID int64 // v3+ + + // The producer's current epoch. This will be checked against the producer + // epoch on the broker, and the request will return an error if they do not + // match. Also added for KIP-360. + // + // This field has a default of -1. + ProducerEpoch int16 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDRequest) Key() int16 { return 22 } +func (*InitProducerIDRequest) MaxVersion() int16 { return 4 } +func (v *InitProducerIDRequest) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDRequest) GetVersion() int16 { return v.Version } +func (v *InitProducerIDRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDRequest) IsTxnCoordinatorRequest() {} +func (v *InitProducerIDRequest) ResponseKind() Response { + r := &InitProducerIDResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *InitProducerIDRequest) RequestWith(ctx context.Context, r Requestor) (*InitProducerIDResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*InitProducerIDResponse) + return resp, err +} + +func (v *InitProducerIDRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TransactionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionalID = v + } + { + v := b.Int32() + s.TransactionTimeoutMillis = v + } + if version >= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDRequest returns a pointer to a default InitProducerIDRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDRequest() *InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDRequest. +func (v *InitProducerIDRequest) Default() { + v.ProducerID = -1 + v.ProducerEpoch = -1 +} + +// NewInitProducerIDRequest returns a default InitProducerIDRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDRequest() InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return v +} + +// InitProducerIDResponse is returned for an InitProducerIDRequest. +type InitProducerIDResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned when not using transactions if + // the client is not authorized for idempotent_write on cluster. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned when using transactions + // if the client is not authorized to write on transactional_id. + // + // INVALID_REQUEST is returned if using transactions and the transactional id + // is an empty, non-null string + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_TRANSACTION_TIMEOUT is returned if using transactions and the timeout + // is equal to over over transaction.max.timeout.ms or under 0. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction + // that is completing at the time this init is called. + ErrorCode int16 + + // ProducerID is the next producer ID that Kafka generated. This ID is used + // to ensure repeated produce requests do not result in duplicate records. + // + // This field has a default of -1. + ProducerID int64 + + // ProducerEpoch is the producer epoch to use for transactions. + ProducerEpoch int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDResponse) Key() int16 { return 22 } +func (*InitProducerIDResponse) MaxVersion() int16 { return 4 } +func (v *InitProducerIDResponse) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDResponse) GetVersion() int16 { return v.Version } +func (v *InitProducerIDResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *InitProducerIDResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *InitProducerIDResponse) RequestKind() Request { + return &InitProducerIDRequest{Version: v.Version} +} + +func (v *InitProducerIDResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDResponse returns a pointer to a default InitProducerIDResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDResponse() *InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDResponse. +func (v *InitProducerIDResponse) Default() { + v.ProducerID = -1 +} + +// NewInitProducerIDResponse returns a default InitProducerIDResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDResponse() InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopicPartition struct { + // Partition is the number of a partition. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or if the client is ahead of the broker. + // + // The initial leader epoch can be determined from a MetadataResponse. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v2+ + + // LeaderEpoch is the epoch to fetch the end offset for. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopicPartition. +func (v *OffsetForLeaderEpochRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 +} + +// NewOffsetForLeaderEpochRequestTopicPartition returns a default OffsetForLeaderEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopicPartition() OffsetForLeaderEpochRequestTopicPartition { + var v OffsetForLeaderEpochRequestTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopic struct { + // Topic is the name of a topic. + Topic string + + // Partitions are partitions within a topic to fetch leader epoch offsets for. + Partitions []OffsetForLeaderEpochRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopic. +func (v *OffsetForLeaderEpochRequestTopic) Default() { +} + +// NewOffsetForLeaderEpochRequestTopic returns a default OffsetForLeaderEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopic() OffsetForLeaderEpochRequestTopic { + var v OffsetForLeaderEpochRequestTopic + v.Default() + return v +} + +// OffsetForLeaderEpochRequest requests log end offsets for partitions. +// +// Version 2, proposed in KIP-320 and introduced in Kafka 2.1.0, can be used by +// consumers to perform more accurate offset resetting in the case of data loss. +// +// In support of version 2, this requires DESCRIBE on TOPIC. +type OffsetForLeaderEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID, added in support of KIP-392, is the broker ID of the follower, + // or -1 if this request is from a consumer. + // + // This field has a default of -2. + ReplicaID int32 // v3+ + + // Topics are topics to fetch leader epoch offsets for. + Topics []OffsetForLeaderEpochRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochRequest) Key() int16 { return 23 } +func (*OffsetForLeaderEpochRequest) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochRequest) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochRequest) ResponseKind() Response { + r := &OffsetForLeaderEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetForLeaderEpochRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetForLeaderEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetForLeaderEpochResponse) + return resp, err +} + +func (v *OffsetForLeaderEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 3 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ReplicaID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 2 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochRequest returns a pointer to a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochRequest() *OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequest. +func (v *OffsetForLeaderEpochRequest) Default() { + v.ReplicaID = -2 +} + +// NewOffsetForLeaderEpochRequest returns a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequest() OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopicPartition struct { + // ErrorCode is the error code returned on request failure. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client does not have + // the necessary permissions to issue this request. + // + // KAFKA_STORAGE_ERROR is returned if the partition is offline. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker knows of the partition + // but does not own it. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of the + // partition. + // + // FENCED_LEADER_EPOCH is returned if the client is using a current leader epoch + // older than the actual leader epoch. + // + // UNKNOWN_LEADER_EPOCH if returned if the client is using a current leader epoch + // that the actual leader does not know of. This could occur when the client + // has newer metadata than the broker when the broker just became the leader for + // a replica. + ErrorCode int16 + + // Partition is the partition this response is for. + Partition int32 + + // LeaderEpoch is similar to the requested leader epoch, but pairs with the + // next field. If the requested leader epoch is unknown, this is -1. If the + // requested epoch had no records produced during the requested epoch, this + // is the first prior epoch that had records. + // + // This field has a default of -1. + LeaderEpoch int32 // v1+ + + // EndOffset is either (1) just past the last recorded offset in the + // current partition if the broker leader has the same epoch as the + // leader epoch in the request, or (2) the beginning offset of the next + // epoch if the leader is past the requested epoch. The second scenario + // can be seen as equivalent to the first: the beginning offset of the + // next epoch is just past the final offset of the prior epoch. + // + // (2) allows consumers to detect data loss: if the consumer consumed + // past the end offset that is returned, then the consumer should reset + // to the returned offset and the consumer knows everything past the end + // offset was lost. + // + // With the prior field, consumers know that at this offset, the broker + // either has no more records (consumer is caught up), or the broker + // transitioned to a new epoch. + // + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopicPartition. +func (v *OffsetForLeaderEpochResponseTopicPartition) Default() { + v.LeaderEpoch = -1 + v.EndOffset = -1 +} + +// NewOffsetForLeaderEpochResponseTopicPartition returns a default OffsetForLeaderEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopicPartition() OffsetForLeaderEpochResponseTopicPartition { + var v OffsetForLeaderEpochResponseTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions are responses to partitions in a topic in the request. + Partitions []OffsetForLeaderEpochResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopic. +func (v *OffsetForLeaderEpochResponseTopic) Default() { +} + +// NewOffsetForLeaderEpochResponseTopic returns a default OffsetForLeaderEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopic() OffsetForLeaderEpochResponseTopic { + var v OffsetForLeaderEpochResponseTopic + v.Default() + return v +} + +// OffsetForLeaderEpochResponse is returned from an OffsetForLeaderEpochRequest. +type OffsetForLeaderEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 // v2+ + + // Topics are responses to topics in the request. + Topics []OffsetForLeaderEpochResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochResponse) Key() int16 { return 23 } +func (*OffsetForLeaderEpochResponse) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochResponse) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *OffsetForLeaderEpochResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *OffsetForLeaderEpochResponse) RequestKind() Request { + return &OffsetForLeaderEpochRequest{Version: v.Version} +} + +func (v *OffsetForLeaderEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + if version >= 1 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochResponse returns a pointer to a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochResponse() *OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponse. +func (v *OffsetForLeaderEpochResponse) Default() { +} + +// NewOffsetForLeaderEpochResponse returns a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponse() OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return v +} + +type AddPartitionsToTxnRequestTopic struct { + // Topic is a topic name. + Topic string + + // Partitions are partitions within a topic to add as part of the producer + // side of a transaction. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTopic. +func (v *AddPartitionsToTxnRequestTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTopic returns a default AddPartitionsToTxnRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTopic() AddPartitionsToTxnRequestTopic { + var v AddPartitionsToTxnRequestTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransactionTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransactionTopic. +func (v *AddPartitionsToTxnRequestTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTransactionTopic returns a default AddPartitionsToTxnRequestTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransactionTopic() AddPartitionsToTxnRequestTransactionTopic { + var v AddPartitionsToTxnRequestTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransaction struct { + TransactionalID string + + ProducerID int64 + + ProducerEpoch int16 + + // VerifyOnly signifies if we want to check if the partition is in the + // transaction rather than add it. + VerifyOnly bool + + Topics []AddPartitionsToTxnRequestTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransaction. +func (v *AddPartitionsToTxnRequestTransaction) Default() { +} + +// NewAddPartitionsToTxnRequestTransaction returns a default AddPartitionsToTxnRequestTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransaction() AddPartitionsToTxnRequestTransaction { + var v AddPartitionsToTxnRequestTransaction + v.Default() + return v +} + +// AddPartitionsToTxnRequest begins the producer side of a transaction for all +// partitions in the request. Before producing any records to a partition in +// the transaction, that partition must have been added to the transaction with +// this request. +// +// Versions 3 and below are exclusively used by clients and versions 4 and +// above are used by brokers. +// +// Version 4 adds VerifyOnly field to check if partitions are already in +// transaction and adds support to batch multiple transactions. +type AddPartitionsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string // v0-v3 + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 // v0-v3 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 // v0-v3 + + // Topics are topics to add as part of the producer side of a transaction. + Topics []AddPartitionsToTxnRequestTopic // v0-v3 + + // The list of transactions to add partitions to, for v4+, for brokers only. + // The fields in this are batch broker requests that duplicate the above fields + // and thus are undocumented (except VerifyOnly, which is new). + Transactions []AddPartitionsToTxnRequestTransaction // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnRequest) Key() int16 { return 24 } +func (*AddPartitionsToTxnRequest) MaxVersion() int16 { return 4 } +func (v *AddPartitionsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddPartitionsToTxnRequest) ResponseKind() Response { + r := &AddPartitionsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddPartitionsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddPartitionsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddPartitionsToTxnResponse) + return resp, err +} + +func (v *AddPartitionsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.VerifyOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + if version >= 0 && version <= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.VerifyOnly = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnRequest returns a pointer to a default AddPartitionsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnRequest() *AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequest. +func (v *AddPartitionsToTxnRequest) Default() { +} + +// NewAddPartitionsToTxnRequest returns a default AddPartitionsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequest() AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopicPartition. +func (v *AddPartitionsToTxnResponseTransactionTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopicPartition returns a default AddPartitionsToTxnResponseTransactionTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopicPartition() AddPartitionsToTxnResponseTransactionTopicPartition { + var v AddPartitionsToTxnResponseTransactionTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopic struct { + Topic string + + Partitions []AddPartitionsToTxnResponseTransactionTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopic. +func (v *AddPartitionsToTxnResponseTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopic returns a default AddPartitionsToTxnResponseTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopic() AddPartitionsToTxnResponseTransactionTopic { + var v AddPartitionsToTxnResponseTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransaction struct { + // The transactional id corresponding to the transaction. + TransactionalID string + + Topics []AddPartitionsToTxnResponseTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransaction. +func (v *AddPartitionsToTxnResponseTransaction) Default() { +} + +// NewAddPartitionsToTxnResponseTransaction returns a default AddPartitionsToTxnResponseTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransaction() AddPartitionsToTxnResponseTransaction { + var v AddPartitionsToTxnResponseTransaction + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopicPartition struct { + // Partition is a partition being responded to. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to write to. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // OPERATION_NOT_ATTEMPTED is returned if any of the above errors occur + // for all partitions that did not have the above errors. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopicPartition. +func (v *AddPartitionsToTxnResponseTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTopicPartition returns a default AddPartitionsToTxnResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopicPartition() AddPartitionsToTxnResponseTopicPartition { + var v AddPartitionsToTxnResponseTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopic struct { + // Topic is a topic being responded to. + Topic string + + // Partitions are responses to partitions in the request. + Partitions []AddPartitionsToTxnResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopic. +func (v *AddPartitionsToTxnResponseTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTopic returns a default AddPartitionsToTxnResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopic() AddPartitionsToTxnResponseTopic { + var v AddPartitionsToTxnResponseTopic + v.Default() + return v +} + +// AddPartitionsToTxnResponse is a response to an AddPartitionsToTxnRequest. +type AddPartitionsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The response top level error code. + ErrorCode int16 // v4+ + + // Results categorized by transactional ID, v4+ only, for brokers only. + // The fields duplicate v3 and below fields (except TransactionalID) and + // are left undocumented. + Transactions []AddPartitionsToTxnResponseTransaction // v4+ + + // Topics are responses to topics in the request. + Topics []AddPartitionsToTxnResponseTopic // v0-v3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnResponse) Key() int16 { return 24 } +func (*AddPartitionsToTxnResponse) MaxVersion() int16 { return 4 } +func (v *AddPartitionsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AddPartitionsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddPartitionsToTxnResponse) RequestKind() Request { + return &AddPartitionsToTxnRequest{Version: v.Version} +} + +func (v *AddPartitionsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 4 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnResponse returns a pointer to a default AddPartitionsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnResponse() *AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponse. +func (v *AddPartitionsToTxnResponse) Default() { +} + +// NewAddPartitionsToTxnResponse returns a default AddPartitionsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponse() AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return v +} + +// AddOffsetsToTxnRequest is a request that ties produced records to what group +// is being consumed for the transaction. +// +// This request must be called before TxnOffsetCommitRequest. +// +// Internally, this request simply adds the __consumer_offsets topic as a +// partition for this transaction with AddPartitionsToTxn for the partition +// in that topic that contains the group. +type AddOffsetsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Group is the group to tie this transaction to. + Group string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnRequest) Key() int16 { return 25 } +func (*AddOffsetsToTxnRequest) MaxVersion() int16 { return 3 } +func (v *AddOffsetsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddOffsetsToTxnRequest) ResponseKind() Response { + r := &AddOffsetsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddOffsetsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddOffsetsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddOffsetsToTxnResponse) + return resp, err +} + +func (v *AddOffsetsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnRequest returns a pointer to a default AddOffsetsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnRequest() *AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnRequest. +func (v *AddOffsetsToTxnRequest) Default() { +} + +// NewAddOffsetsToTxnRequest returns a default AddOffsetsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnRequest() AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return v +} + +// AddOffsetsToTxnResponse is a response to an AddOffsetsToTxnRequest. +type AddOffsetsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // This also can return any error that AddPartitionsToTxn returns. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnResponse) Key() int16 { return 25 } +func (*AddOffsetsToTxnResponse) MaxVersion() int16 { return 3 } +func (v *AddOffsetsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AddOffsetsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddOffsetsToTxnResponse) RequestKind() Request { + return &AddOffsetsToTxnRequest{Version: v.Version} +} + +func (v *AddOffsetsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnResponse returns a pointer to a default AddOffsetsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnResponse() *AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnResponse. +func (v *AddOffsetsToTxnResponse) Default() { +} + +// NewAddOffsetsToTxnResponse returns a default AddOffsetsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnResponse() AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return v +} + +// EndTxnRequest ends a transaction. This should be called after +// TxnOffsetCommitRequest. +type EndTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Commit is whether to commit this transaction: true for yes, false for abort. + Commit bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnRequest) Key() int16 { return 26 } +func (*EndTxnRequest) MaxVersion() int16 { return 3 } +func (v *EndTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *EndTxnRequest) GetVersion() int16 { return v.Version } +func (v *EndTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnRequest) IsTxnCoordinatorRequest() {} +func (v *EndTxnRequest) ResponseKind() Response { + r := &EndTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndTxnRequest) RequestWith(ctx context.Context, r Requestor) (*EndTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndTxnResponse) + return resp, err +} + +func (v *EndTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Commit + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Commit = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnRequest returns a pointer to a default EndTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnRequest() *EndTxnRequest { + var v EndTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnRequest. +func (v *EndTxnRequest) Default() { +} + +// NewEndTxnRequest returns a default EndTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnRequest() EndTxnRequest { + var v EndTxnRequest + v.Default() + return v +} + +// EndTxnResponse is a response for an EndTxnRequest. +type EndTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + // + // INVALID_TXN_STATE is returned if this request is attempted at the wrong + // time (given the order of how transaction requests should go). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnResponse) Key() int16 { return 26 } +func (*EndTxnResponse) MaxVersion() int16 { return 3 } +func (v *EndTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *EndTxnResponse) GetVersion() int16 { return v.Version } +func (v *EndTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *EndTxnResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *EndTxnResponse) RequestKind() Request { return &EndTxnRequest{Version: v.Version} } + +func (v *EndTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnResponse returns a pointer to a default EndTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnResponse() *EndTxnResponse { + var v EndTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnResponse. +func (v *EndTxnResponse) Default() { +} + +// NewEndTxnResponse returns a default EndTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnResponse() EndTxnResponse { + var v EndTxnResponse + v.Default() + return v +} + +type WriteTxnMarkersRequestMarkerTopic struct { + // Topic is the name of the topic to write markers for. + Topic string + + // Partitions contains partitions to write markers for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarkerTopic. +func (v *WriteTxnMarkersRequestMarkerTopic) Default() { +} + +// NewWriteTxnMarkersRequestMarkerTopic returns a default WriteTxnMarkersRequestMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarkerTopic() WriteTxnMarkersRequestMarkerTopic { + var v WriteTxnMarkersRequestMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersRequestMarker struct { + // ProducerID is the current producer ID to use when writing a marker. + ProducerID int64 + + // ProducerEpoch is the current producer epoch to use when writing a + // marker. + ProducerEpoch int16 + + // Committed is true if this marker is for a committed transaction, + // otherwise false if this is for an aborted transaction. + Committed bool + + // Topics contains the topics we are writing markers for. + Topics []WriteTxnMarkersRequestMarkerTopic + + // CoordinatorEpoch is the current epoch of the transaction coordinator we + // are writing a marker to. This is used to detect fenced writers. + CoordinatorEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarker. +func (v *WriteTxnMarkersRequestMarker) Default() { +} + +// NewWriteTxnMarkersRequestMarker returns a default WriteTxnMarkersRequestMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarker() WriteTxnMarkersRequestMarker { + var v WriteTxnMarkersRequestMarker + v.Default() + return v +} + +// WriteTxnMarkersRequest is a broker-to-broker request that Kafka uses to +// finish transactions. +type WriteTxnMarkersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains transactional markers to be written. + Markers []WriteTxnMarkersRequestMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersRequest) Key() int16 { return 27 } +func (*WriteTxnMarkersRequest) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersRequest) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersRequest) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersRequest) ResponseKind() Response { + r := &WriteTxnMarkersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *WriteTxnMarkersRequest) RequestWith(ctx context.Context, r Requestor) (*WriteTxnMarkersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*WriteTxnMarkersResponse) + return resp, err +} + +func (v *WriteTxnMarkersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Committed + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Committed = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersRequest returns a pointer to a default WriteTxnMarkersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersRequest() *WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequest. +func (v *WriteTxnMarkersRequest) Default() { +} + +// NewWriteTxnMarkersRequest returns a default WriteTxnMarkersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequest() WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopicPartition struct { + // Partition is the partition this result is for. + Partition int32 + + // ErrorCode is non-nil if writing the transansactional marker for this + // partition errored. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the user does not have + // CLUSTER_ACTION on CLUSTER. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this + // request is not the leader of the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition is + // not known to exist. + // + // INVALID_PRODUCER_EPOCH is returned if the cluster epoch is provided + // and the provided epoch does not match. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopicPartition. +func (v *WriteTxnMarkersResponseMarkerTopicPartition) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopicPartition returns a default WriteTxnMarkersResponseMarkerTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopicPartition() WriteTxnMarkersResponseMarkerTopicPartition { + var v WriteTxnMarkersResponseMarkerTopicPartition + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopic struct { + // Topic is the topic these results are for. + Topic string + + // Partitions contains per-partition results for the write markers + // request. + Partitions []WriteTxnMarkersResponseMarkerTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopic. +func (v *WriteTxnMarkersResponseMarkerTopic) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopic returns a default WriteTxnMarkersResponseMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopic() WriteTxnMarkersResponseMarkerTopic { + var v WriteTxnMarkersResponseMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersResponseMarker struct { + // ProducerID is the producer ID these results are for (from the input + // request). + ProducerID int64 + + // Topics contains the results for the write markers request. + Topics []WriteTxnMarkersResponseMarkerTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarker. +func (v *WriteTxnMarkersResponseMarker) Default() { +} + +// NewWriteTxnMarkersResponseMarker returns a default WriteTxnMarkersResponseMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarker() WriteTxnMarkersResponseMarker { + var v WriteTxnMarkersResponseMarker + v.Default() + return v +} + +// WriteTxnMarkersResponse is a response to a WriteTxnMarkersRequest. +type WriteTxnMarkersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains results for writing transactional markers. + Markers []WriteTxnMarkersResponseMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersResponse) Key() int16 { return 27 } +func (*WriteTxnMarkersResponse) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersResponse) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersResponse) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersResponse) RequestKind() Request { + return &WriteTxnMarkersRequest{Version: v.Version} +} + +func (v *WriteTxnMarkersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersResponse returns a pointer to a default WriteTxnMarkersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersResponse() *WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponse. +func (v *WriteTxnMarkersResponse) Default() { +} + +// NewWriteTxnMarkersResponse returns a default WriteTxnMarkersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponse() WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return v +} + +type TxnOffsetCommitRequestTopicPartition struct { + // Partition is a partition to add for a pending commit. + Partition int32 + + // Offset is the offset within partition to commit once EndTxnRequest is + // called (with commit; abort obviously aborts). + Offset int64 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v2+ + + // Metadata is optional metadata the client wants to include with this + // commit. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopicPartition. +func (v *TxnOffsetCommitRequestTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewTxnOffsetCommitRequestTopicPartition returns a default TxnOffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopicPartition() TxnOffsetCommitRequestTopicPartition { + var v TxnOffsetCommitRequestTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitRequestTopic struct { + // Topic is a topic to add for a pending commit. + Topic string + + // Partitions are partitions to add for pending commits. + Partitions []TxnOffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopic. +func (v *TxnOffsetCommitRequestTopic) Default() { +} + +// NewTxnOffsetCommitRequestTopic returns a default TxnOffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopic() TxnOffsetCommitRequestTopic { + var v TxnOffsetCommitRequestTopic + v.Default() + return v +} + +// TxnOffsetCommitRequest sends offsets that are a part of this transaction +// to be committed once the transaction itself finishes. This effectively +// replaces OffsetCommitRequest for when using transactions. +type TxnOffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // Group is the group consumed in this transaction and to be used for + // committing. + Group string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Generation is the group generation this transactional offset commit request is for. + // + // This field has a default of -1. + Generation int32 // v3+ + + // MemberID is the member ID this member is for. + MemberID string // v3+ + + // InstanceID is the instance ID of this member in the group (KIP-345, KIP-447). + InstanceID *string // v3+ + + // Topics are topics to add for pending commits. + Topics []TxnOffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitRequest) Key() int16 { return 28 } +func (*TxnOffsetCommitRequest) MaxVersion() int16 { return 3 } +func (v *TxnOffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *TxnOffsetCommitRequest) ResponseKind() Response { + r := &TxnOffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *TxnOffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*TxnOffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*TxnOffsetCommitResponse) + return resp, err +} + +func (v *TxnOffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 3 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 2 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitRequest returns a pointer to a default TxnOffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitRequest() *TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequest. +func (v *TxnOffsetCommitRequest) Default() { + v.Generation = -1 +} + +// NewTxnOffsetCommitRequest returns a default TxnOffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequest() TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return v +} + +type TxnOffsetCommitResponseTopicPartition struct { + // Partition is the partition this response is for. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to read. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // INVALID_GROUP_ID is returned if the requested group does not exist. + // + // COORDINATOR_NOT_AVAILABLE is returned if the broker is not yet fully + // started or is shutting down, or if the group was just deleted or is + // migrating to another broker. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // the group. + // + // FENCED_INSTANCE_ID is returned if the member is fenced (another newer + // transactional member is using the same instance ID). + // + // UNKNOWN_MEMBER_ID is returned if the consumer group does not know of + // this member. + // + // ILLEGAL_GENERATION is returned if the consumer group's generation is + // different than the requested generation. + // + // OFFSET_METADATA_TOO_LARGE is returned if the commit metadata is too + // large. + // + // REBALANCE_IN_PROGRESS is returned if the group is completing a rebalance. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopicPartition. +func (v *TxnOffsetCommitResponseTopicPartition) Default() { +} + +// NewTxnOffsetCommitResponseTopicPartition returns a default TxnOffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopicPartition() TxnOffsetCommitResponseTopicPartition { + var v TxnOffsetCommitResponseTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitResponseTopic struct { + // Topic is the topic this response is for. + Topic string + + // Partitions contains responses to the partitions in this topic. + Partitions []TxnOffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopic. +func (v *TxnOffsetCommitResponseTopic) Default() { +} + +// NewTxnOffsetCommitResponseTopic returns a default TxnOffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopic() TxnOffsetCommitResponseTopic { + var v TxnOffsetCommitResponseTopic + v.Default() + return v +} + +// TxnOffsetCommitResponse is a response to a TxnOffsetCommitRequest. +type TxnOffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to the topics in the request. + Topics []TxnOffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitResponse) Key() int16 { return 28 } +func (*TxnOffsetCommitResponse) MaxVersion() int16 { return 3 } +func (v *TxnOffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *TxnOffsetCommitResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *TxnOffsetCommitResponse) RequestKind() Request { + return &TxnOffsetCommitRequest{Version: v.Version} +} + +func (v *TxnOffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitResponse returns a pointer to a default TxnOffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitResponse() *TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponse. +func (v *TxnOffsetCommitResponse) Default() { +} + +// NewTxnOffsetCommitResponse returns a default TxnOffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponse() TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return v +} + +// DescribeACLsRequest describes ACLs. Describing ACLs works on a filter basis: +// anything that matches the filter is described. Note that there are two +// "types" of filters in this request: the resource filter and the entry +// filter, with entries corresponding to users. The first three fields form the +// resource filter, the last four the entry filter. +type DescribeACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ResourceType is the type of resource to describe. + ResourceType ACLResourceType + + // ResourceName is the name to filter out. For the CLUSTER resource type, + // this must be "kafka-cluster". + ResourceName *string + + // ResourcePatternType is how ResourceName is understood. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to filter for. In Kafka with the simple authorizor, + // all principals begin with "User:". Pluggable authorizors are allowed, but + // Kafka still expects principals to lead with a principal type ("User") and + // have a colon separating the principal name ("bob" in "User:bob"). + Principal *string + + // Host is a host to filter for. + Host *string + + // Operation is an operation to filter for. + // + // Note that READ, WRITE, DELETE, and ALTER imply DESCRIBE, and ALTER_CONFIGS + // implies DESCRIBE_CONFIGS. + Operation ACLOperation + + // PermissionType is the permission type to filter for. UNKNOWN is 0. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsRequest) Key() int16 { return 29 } +func (*DescribeACLsRequest) MaxVersion() int16 { return 3 } +func (v *DescribeACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsRequest) ResponseKind() Response { + r := &DescribeACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeACLsResponse) + return resp, err +} + +func (v *DescribeACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsRequest returns a pointer to a default DescribeACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsRequest() *DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsRequest. +func (v *DescribeACLsRequest) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsRequest returns a default DescribeACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsRequest() DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return v +} + +type DescribeACLsResponseResourceACL struct { + // Principal is who this ACL applies to. + Principal string + + // Host is on which host this ACL applies. + Host string + + // Operation is the operation being described. + Operation ACLOperation + + // PermissionType is the permission being described. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResourceACL. +func (v *DescribeACLsResponseResourceACL) Default() { +} + +// NewDescribeACLsResponseResourceACL returns a default DescribeACLsResponseResourceACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResourceACL() DescribeACLsResponseResourceACL { + var v DescribeACLsResponseResourceACL + v.Default() + return v +} + +type DescribeACLsResponseResource struct { + // ResourceType is the resource type being described. + ResourceType ACLResourceType + + // ResourceName is the resource name being described. + ResourceName string + + // ResourcePatternType is the pattern type being described. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // ACLs contains users / entries being described. + ACLs []DescribeACLsResponseResourceACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResource. +func (v *DescribeACLsResponseResource) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsResponseResource returns a default DescribeACLsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResource() DescribeACLsResponseResource { + var v DescribeACLsResponseResource + v.Default() + return v +} + +// DescribeACLsResponse is a response to a describe acls request. +type DescribeACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is the error code returned on request failure. + // + // SECURITY_DISABLED is returned if there is no authorizer configured on the + // broker. + // + // There can be other authorization failures. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string + + // Resources are the describe resources. + Resources []DescribeACLsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsResponse) Key() int16 { return 29 } +func (*DescribeACLsResponse) MaxVersion() int16 { return 3 } +func (v *DescribeACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeACLsResponse) RequestKind() Request { return &DescribeACLsRequest{Version: v.Version} } + +func (v *DescribeACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + v := s.ACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResourceACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsResponse returns a pointer to a default DescribeACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsResponse() *DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponse. +func (v *DescribeACLsResponse) Default() { +} + +// NewDescribeACLsResponse returns a default DescribeACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponse() DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return v +} + +type CreateACLsRequestCreation struct { + // ResourceType is the type of resource this acl entry will be on. + // It is invalid to use UNKNOWN or ANY. + ResourceType ACLResourceType + + // ResourceName is the name of the resource this acl entry will be on. + // For CLUSTER, this must be "kafka-cluster". + ResourceName string + + // ResourcePatternType is the pattern type to use for the resource name. + // This cannot be UNKNOWN or MATCH (i.e. this must be LITERAL or PREFIXED). + // The default for pre-Kafka 2.0.0 is effectively LITERAL. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to apply this acl for. With the Kafka simple + // authorizer, this must begin with "User:". + Principal string + + // Host is the host address to use for this acl. Each host to allow + // the principal access from must be specified as a new creation. KIP-252 + // might solve this someday. The special wildcard host "*" allows all hosts. + Host string + + // Operation is the operation this acl is for. This must not be UNKNOWN or + // ANY. + Operation ACLOperation + + // PermissionType is the permission of this acl. This must be either ALLOW + // or DENY. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequestCreation. +func (v *CreateACLsRequestCreation) Default() { + v.ResourcePatternType = 3 +} + +// NewCreateACLsRequestCreation returns a default CreateACLsRequestCreation +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequestCreation() CreateACLsRequestCreation { + var v CreateACLsRequestCreation + v.Default() + return v +} + +// CreateACLsRequest creates acls. Creating acls can be done as a batch; each +// "creation" will be an acl entry. +// +// See the DescribeACLsRequest documentation for more descriptions of what +// valid values for the fields in this request are. +type CreateACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Creations []CreateACLsRequestCreation + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsRequest) Key() int16 { return 30 } +func (*CreateACLsRequest) MaxVersion() int16 { return 3 } +func (v *CreateACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsRequest) GetVersion() int16 { return v.Version } +func (v *CreateACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsRequest) ResponseKind() Response { + r := &CreateACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateACLsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateACLsResponse) + return resp, err +} + +func (v *CreateACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Creations + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Creations + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsRequestCreation, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Creations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsRequest returns a pointer to a default CreateACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsRequest() *CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequest. +func (v *CreateACLsRequest) Default() { +} + +// NewCreateACLsRequest returns a default CreateACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequest() CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return v +} + +type CreateACLsResponseResult struct { + // ErrorCode is an error for this particular creation (index wise). + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponseResult. +func (v *CreateACLsResponseResult) Default() { +} + +// NewCreateACLsResponseResult returns a default CreateACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponseResult() CreateACLsResponseResult { + var v CreateACLsResponseResult + v.Default() + return v +} + +// CreateACLsResponse is a response for a CreateACLsRequest. +type CreateACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains responses to each creation request. + Results []CreateACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsResponse) Key() int16 { return 30 } +func (*CreateACLsResponse) MaxVersion() int16 { return 3 } +func (v *CreateACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsResponse) GetVersion() int16 { return v.Version } +func (v *CreateACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreateACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateACLsResponse) RequestKind() Request { return &CreateACLsRequest{Version: v.Version} } + +func (v *CreateACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsResponse returns a pointer to a default CreateACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsResponse() *CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponse. +func (v *CreateACLsResponse) Default() { +} + +// NewCreateACLsResponse returns a default CreateACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponse() CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return v +} + +type DeleteACLsRequestFilter struct { + ResourceType ACLResourceType + + ResourceName *string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal *string + + Host *string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequestFilter. +func (v *DeleteACLsRequestFilter) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsRequestFilter returns a default DeleteACLsRequestFilter +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequestFilter() DeleteACLsRequestFilter { + var v DeleteACLsRequestFilter + v.Default() + return v +} + +// DeleteACLsRequest deletes acls. This request works on filters the same way +// that DescribeACLsRequest does. See DescribeACLsRequest for documentation of +// the fields. +type DeleteACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Filters are filters for acls to delete. + Filters []DeleteACLsRequestFilter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsRequest) Key() int16 { return 31 } +func (*DeleteACLsRequest) MaxVersion() int16 { return 3 } +func (v *DeleteACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsRequest) ResponseKind() Response { + r := &DeleteACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteACLsResponse) + return resp, err +} + +func (v *DeleteACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Filters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Filters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsRequestFilter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Filters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsRequest returns a pointer to a default DeleteACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsRequest() *DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequest. +func (v *DeleteACLsRequest) Default() { +} + +// NewDeleteACLsRequest returns a default DeleteACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequest() DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return v +} + +type DeleteACLsResponseResultMatchingACL struct { + // ErrorCode contains an error for this individual acl for this filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + ResourceType ACLResourceType + + ResourceName string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal string + + Host string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResultMatchingACL. +func (v *DeleteACLsResponseResultMatchingACL) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsResponseResultMatchingACL returns a default DeleteACLsResponseResultMatchingACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResultMatchingACL() DeleteACLsResponseResultMatchingACL { + var v DeleteACLsResponseResultMatchingACL + v.Default() + return v +} + +type DeleteACLsResponseResult struct { + // ErrorCode is the overall error code for this individual filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // MatchingACLs contains all acls that were matched for this filter. + MatchingACLs []DeleteACLsResponseResultMatchingACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResult. +func (v *DeleteACLsResponseResult) Default() { +} + +// NewDeleteACLsResponseResult returns a default DeleteACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResult() DeleteACLsResponseResult { + var v DeleteACLsResponseResult + v.Default() + return v +} + +// DeleteACLsResponse is a response for a DeleteACLsRequest. +type DeleteACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains a response to each requested filter. + Results []DeleteACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsResponse) Key() int16 { return 31 } +func (*DeleteACLsResponse) MaxVersion() int16 { return 3 } +func (v *DeleteACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteACLsResponse) RequestKind() Request { return &DeleteACLsRequest{Version: v.Version} } + +func (v *DeleteACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MatchingACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.MatchingACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResultMatchingACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.MatchingACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsResponse returns a pointer to a default DeleteACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsResponse() *DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponse. +func (v *DeleteACLsResponse) Default() { +} + +// NewDeleteACLsResponse returns a default DeleteACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponse() DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return v +} + +type DescribeConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to describe. + ResourceType ConfigResourceType + + // ResourceName is the name of config to describe. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // returns all broker configs, but only the dynamic configuration values. + // If a specific ID, this returns all broker config values. + ResourceName string + + // ConfigNames is a list of config entries to return. Null requests all. + ConfigNames []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequestResource. +func (v *DescribeConfigsRequestResource) Default() { +} + +// NewDescribeConfigsRequestResource returns a default DescribeConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequestResource() DescribeConfigsRequestResource { + var v DescribeConfigsRequestResource + v.Default() + return v +} + +// DescribeConfigsRequest issues a request to describe configs that Kafka +// currently has. These are the key/value pairs that one uses to configure +// brokers and topics. +type DescribeConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is a list of resources to describe. + Resources []DescribeConfigsRequestResource + + // IncludeSynonyms signifies whether to return config entry synonyms for + // all config entries. + IncludeSynonyms bool // v1+ + + // IncludeDocumentation signifies whether to return documentation for + // config entries. + IncludeDocumentation bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsRequest) Key() int16 { return 32 } +func (*DescribeConfigsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsRequest) ResponseKind() Response { + r := &DescribeConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeConfigsResponse) + return resp, err +} + +func (v *DescribeConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ConfigNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.IncludeSynonyms + dst = kbin.AppendBool(dst, v) + } + if version >= 3 { + v := v.IncludeDocumentation + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.ConfigNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.ConfigNames = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if version >= 1 { + v := b.Bool() + s.IncludeSynonyms = v + } + if version >= 3 { + v := b.Bool() + s.IncludeDocumentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsRequest returns a pointer to a default DescribeConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsRequest() *DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequest. +func (v *DescribeConfigsRequest) Default() { +} + +// NewDescribeConfigsRequest returns a default DescribeConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequest() DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfigConfigSynonym struct { + Name string + + Value *string + + Source ConfigSource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfigConfigSynonym. +func (v *DescribeConfigsResponseResourceConfigConfigSynonym) Default() { +} + +// NewDescribeConfigsResponseResourceConfigConfigSynonym returns a default DescribeConfigsResponseResourceConfigConfigSynonym +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfigConfigSynonym() DescribeConfigsResponseResourceConfigConfigSynonym { + var v DescribeConfigsResponseResourceConfigConfigSynonym + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfig struct { + // Name is a key this entry corresponds to (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + // + // Note that this field is not always correct, and you may need to check + // whether the Source is any dynamic enum. See franz-go#91 for more details. + ReadOnly bool + + // IsDefault is whether this is a default config option. This has been + // replaced in favor of Source. + IsDefault bool + + // Source is where this config entry is from. + // + // This field has a default of -1. + Source ConfigSource // v1+ + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // ConfigSynonyms contains fallback key/value pairs for this config + // entry, in order of preference. That is, if a config entry is both + // dynamically configured and has a default, the top level return will be + // the dynamic configuration, while its "synonym" will be the default. + ConfigSynonyms []DescribeConfigsResponseResourceConfigConfigSynonym // v1+ + + // ConfigType specifies the configuration data type. + ConfigType ConfigType // v3+ + + // Documentation is optional documentation for the config entry. + Documentation *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfig. +func (v *DescribeConfigsResponseResourceConfig) Default() { + v.Source = -1 +} + +// NewDescribeConfigsResponseResourceConfig returns a default DescribeConfigsResponseResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfig() DescribeConfigsResponseResourceConfig { + var v DescribeConfigsResponseResourceConfig + v.Default() + return v +} + +type DescribeConfigsResponseResource struct { + // ErrorCode is the error code returned for describing configs. + // + // INVALID_REQUEST is returned if asking to descibe an invalid resource + // type. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to describe broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to describe topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the describe config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of described config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the describe config request. + ResourceName string + + // Configs contains information about key/value config pairs for + // the requested resource. + Configs []DescribeConfigsResponseResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResource. +func (v *DescribeConfigsResponseResource) Default() { +} + +// NewDescribeConfigsResponseResource returns a default DescribeConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResource() DescribeConfigsResponseResource { + var v DescribeConfigsResponseResource + v.Default() + return v +} + +// DescribeConfigsResponse is returned from a DescribeConfigsRequest. +type DescribeConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 + + // Resources are responses for each resource in the describe config request. + Resources []DescribeConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsResponse) Key() int16 { return 32 } +func (*DescribeConfigsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeConfigsResponse) RequestKind() Request { + return &DescribeConfigsRequest{Version: v.Version} +} + +func (v *DescribeConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + if version >= 0 && version <= 0 { + v := v.IsDefault + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.ConfigSynonyms + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.ConfigType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if version >= 3 { + v := v.Documentation + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.IsDefault = v + } + if version >= 1 { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if version >= 1 { + v := s.ConfigSynonyms + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfigConfigSynonym, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ConfigSynonyms = v + } + if version >= 3 { + var t ConfigType + { + v := b.Int8() + t = ConfigType(v) + } + v := t + s.ConfigType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Documentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsResponse returns a pointer to a default DescribeConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsResponse() *DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponse. +func (v *DescribeConfigsResponse) Default() { +} + +// NewDescribeConfigsResponse returns a default DescribeConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponse() DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return v +} + +type AlterConfigsRequestResourceConfig struct { + // Name is a key to set (e.g. segment.bytes). + Name string + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResourceConfig. +func (v *AlterConfigsRequestResourceConfig) Default() { +} + +// NewAlterConfigsRequestResourceConfig returns a default AlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResourceConfig() AlterConfigsRequestResourceConfig { + var v AlterConfigsRequestResourceConfig + v.Default() + return v +} + +type AlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + // The only two valid values are 2 (for topic) and 4 (for broker). + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []AlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResource. +func (v *AlterConfigsRequestResource) Default() { +} + +// NewAlterConfigsRequestResource returns a default AlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResource() AlterConfigsRequestResource { + var v AlterConfigsRequestResource + v.Default() + return v +} + +// AlterConfigsRequest issues a request to alter either topic or broker +// configs. +// +// Note that to alter configs, you must specify the whole config on every +// request. All existing non-static values will be removed. This means that +// to add one key/value to a config, you must describe the config and then +// issue an alter request with the current config with the new key value. +// This also means that dynamic sensitive values, which are not returned +// in describe configs, will be lost. +// +// To fix this problem, the AlterConfigs request / response was deprecated +// in Kafka 2.3.0 in favor of the new IncrementalAlterConfigs request / response. +// See KIP-339 for more details. +type AlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []AlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsRequest) Key() int16 { return 33 } +func (*AlterConfigsRequest) MaxVersion() int16 { return 2 } +func (v *AlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *AlterConfigsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsRequest) ResponseKind() Response { + r := &AlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterConfigsResponse) + return resp, err +} + +func (v *AlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsRequest returns a pointer to a default AlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsRequest() *AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequest. +func (v *AlterConfigsRequest) Default() { +} + +// NewAlterConfigsRequest returns a default AlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequest() AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return v +} + +type AlterConfigsResponseResource struct { + // ErrorCode is the error code returned for altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the alter config request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponseResource. +func (v *AlterConfigsResponseResource) Default() { +} + +// NewAlterConfigsResponseResource returns a default AlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponseResource() AlterConfigsResponseResource { + var v AlterConfigsResponseResource + v.Default() + return v +} + +// AlterConfigsResponse is returned from an AlterConfigsRequest. +type AlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Resources are responses for each resource in the alter request. + Resources []AlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsResponse) Key() int16 { return 33 } +func (*AlterConfigsResponse) MaxVersion() int16 { return 2 } +func (v *AlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *AlterConfigsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AlterConfigsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterConfigsResponse) RequestKind() Request { return &AlterConfigsRequest{Version: v.Version} } + +func (v *AlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsResponse returns a pointer to a default AlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsResponse() *AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponse. +func (v *AlterConfigsResponse) Default() { +} + +// NewAlterConfigsResponse returns a default AlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponse() AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDirTopic struct { + // Topic is a topic to move. + Topic string + + // Partitions contains partitions for the topic to move. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDirTopic. +func (v *AlterReplicaLogDirsRequestDirTopic) Default() { +} + +// NewAlterReplicaLogDirsRequestDirTopic returns a default AlterReplicaLogDirsRequestDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDirTopic() AlterReplicaLogDirsRequestDirTopic { + var v AlterReplicaLogDirsRequestDirTopic + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDir struct { + // Dir is an absolute path where everything listed below should + // end up. + Dir string + + // Topics contains topics to move to the above log directory. + Topics []AlterReplicaLogDirsRequestDirTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDir. +func (v *AlterReplicaLogDirsRequestDir) Default() { +} + +// NewAlterReplicaLogDirsRequestDir returns a default AlterReplicaLogDirsRequestDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDir() AlterReplicaLogDirsRequestDir { + var v AlterReplicaLogDirsRequestDir + v.Default() + return v +} + +// AlterReplicaLogDirsRequest requests for log directories to be moved +// within Kafka. +// +// This is primarily useful for moving directories between disks. +type AlterReplicaLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Dirs contains absolute paths of where you want things to end up. + Dirs []AlterReplicaLogDirsRequestDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsRequest) Key() int16 { return 34 } +func (*AlterReplicaLogDirsRequest) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsRequest) ResponseKind() Response { + r := &AlterReplicaLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterReplicaLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterReplicaLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterReplicaLogDirsResponse) + return resp, err +} + +func (v *AlterReplicaLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsRequest returns a pointer to a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsRequest() *AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequest. +func (v *AlterReplicaLogDirsRequest) Default() { +} + +// NewAlterReplicaLogDirsRequest returns a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequest() AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopicPartition struct { + // Partition is the partition this array slot corresponds to. + Partition int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to alter replica dirs. + // + // LOG_DIR_NOT_FOUND is returned when the requested log directory + // is not in the broker config. + // + // KAFKA_STORAGE_EXCEPTION is returned when destination directory or + // requested replica is offline. + // + // REPLICA_NOT_AVAILABLE is returned if the replica does not exist + // yet. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopicPartition. +func (v *AlterReplicaLogDirsResponseTopicPartition) Default() { +} + +// NewAlterReplicaLogDirsResponseTopicPartition returns a default AlterReplicaLogDirsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopicPartition() AlterReplicaLogDirsResponseTopicPartition { + var v AlterReplicaLogDirsResponseTopicPartition + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopic struct { + // Topic is the topic this array slot corresponds to. + Topic string + + // Partitions contains responses to each partition that was requested + // to move. + Partitions []AlterReplicaLogDirsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopic. +func (v *AlterReplicaLogDirsResponseTopic) Default() { +} + +// NewAlterReplicaLogDirsResponseTopic returns a default AlterReplicaLogDirsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopic() AlterReplicaLogDirsResponseTopic { + var v AlterReplicaLogDirsResponseTopic + v.Default() + return v +} + +// AlterReplicaLogDirsResponse is returned from an AlterReplicaLogDirsRequest. +type AlterReplicaLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to each topic that had partitions requested + // for moving. + Topics []AlterReplicaLogDirsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsResponse) Key() int16 { return 34 } +func (*AlterReplicaLogDirsResponse) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AlterReplicaLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterReplicaLogDirsResponse) RequestKind() Request { + return &AlterReplicaLogDirsRequest{Version: v.Version} +} + +func (v *AlterReplicaLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsResponse returns a pointer to a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsResponse() *AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponse. +func (v *AlterReplicaLogDirsResponse) Default() { +} + +// NewAlterReplicaLogDirsResponse returns a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponse() AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return v +} + +type DescribeLogDirsRequestTopic struct { + // Topic is a topic to describe the log dir of. + Topic string + + // Partitions contains topic partitions to describe the log dirs of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequestTopic. +func (v *DescribeLogDirsRequestTopic) Default() { +} + +// NewDescribeLogDirsRequestTopic returns a default DescribeLogDirsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequestTopic() DescribeLogDirsRequestTopic { + var v DescribeLogDirsRequestTopic + v.Default() + return v +} + +// DescribeLogDirsRequest requests directory information for topic partitions. +// This request was added in support of KIP-113. +type DescribeLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to describe the log dirs of. If this is + // null, the response includes all topics and all of their partitions. + Topics []DescribeLogDirsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsRequest) Key() int16 { return 35 } +func (*DescribeLogDirsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsRequest) ResponseKind() Response { + r := &DescribeLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeLogDirsResponse) + return resp, err +} + +func (v *DescribeLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeLogDirsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsRequest returns a pointer to a default DescribeLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsRequest() *DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequest. +func (v *DescribeLogDirsRequest) Default() { +} + +// NewDescribeLogDirsRequest returns a default DescribeLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequest() DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopicPartition struct { + // Partition is a partition ID. + Partition int32 + + // Size is the total size of the log sements of this partition, in bytes. + Size int64 + + // OffsetLag is how far behind the log end offset is compared to + // the partition's high watermark (if this is the current log for + // the partition) or compared to the current replica's log end + // offset (if this is the future log for the patition). + // + // The math is, + // + // if IsFuture, localLogEndOffset - futurelogEndOffset. + // + // otherwise, max(localHighWatermark - logEndOffset, 0). + OffsetLag int64 + + // IsFuture is true if this replica was created by an + // AlterReplicaLogDirsRequest and will replace the current log of the + // replica in the future. + IsFuture bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopicPartition. +func (v *DescribeLogDirsResponseDirTopicPartition) Default() { +} + +// NewDescribeLogDirsResponseDirTopicPartition returns a default DescribeLogDirsResponseDirTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopicPartition() DescribeLogDirsResponseDirTopicPartition { + var v DescribeLogDirsResponseDirTopicPartition + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopic struct { + // Topic is the name of a Kafka topic. + Topic string + + // Partitions is the set of queried partitions for a topic that are + // within a log directory. + Partitions []DescribeLogDirsResponseDirTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopic. +func (v *DescribeLogDirsResponseDirTopic) Default() { +} + +// NewDescribeLogDirsResponseDirTopic returns a default DescribeLogDirsResponseDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopic() DescribeLogDirsResponseDirTopic { + var v DescribeLogDirsResponseDirTopic + v.Default() + return v +} + +type DescribeLogDirsResponseDir struct { + // ErrorCode is the error code returned for describing log dirs. + // + // KAFKA_STORAGE_ERROR is returned if the log directory is offline. + ErrorCode int16 + + // Dir is the absolute path of a log directory. + Dir string + + // Topics is an array of topics within a log directory. + Topics []DescribeLogDirsResponseDirTopic + + // TotalBytes is the total size in bytes of the volume the log directory is + // in. + // + // This field has a default of -1. + TotalBytes int64 // v4+ + + // UsableBytes is the usable size in bytes of the volume the log directory + // is in. + // + // This field has a default of -1. + UsableBytes int64 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDir. +func (v *DescribeLogDirsResponseDir) Default() { + v.TotalBytes = -1 + v.UsableBytes = -1 +} + +// NewDescribeLogDirsResponseDir returns a default DescribeLogDirsResponseDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDir() DescribeLogDirsResponseDir { + var v DescribeLogDirsResponseDir + v.Default() + return v +} + +// DescribeLogDirsResponse is returned from a DescribeLogDirsRequest. +type DescribeLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The error code, or 0 if there was no error. + ErrorCode int16 // v3+ + + // Dirs pairs log directories with the topics and partitions that are + // stored in those directores. + Dirs []DescribeLogDirsResponseDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsResponse) Key() int16 { return 35 } +func (*DescribeLogDirsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeLogDirsResponse) RequestKind() Request { + return &DescribeLogDirsRequest{Version: v.Version} +} + +func (v *DescribeLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.OffsetLag + dst = kbin.AppendInt64(dst, v) + } + { + v := v.IsFuture + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.TotalBytes + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.UsableBytes + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 3 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.OffsetLag = v + } + { + v := b.Bool() + s.IsFuture = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Int64() + s.TotalBytes = v + } + if version >= 4 { + v := b.Int64() + s.UsableBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsResponse returns a pointer to a default DescribeLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsResponse() *DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponse. +func (v *DescribeLogDirsResponse) Default() { +} + +// NewDescribeLogDirsResponse returns a default DescribeLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponse() DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return v +} + +// SASLAuthenticate continues a sasl authentication flow. Prior to Kafka 1.0.0, +// authenticating with sasl involved sending raw blobs of data back and forth. +// After, those blobs are wrapped in a SASLAuthenticateRequest The benefit of +// this wrapping is that Kafka can indicate errors in the response, rather than +// just closing the connection. Additionally, the response allows for further +// extension fields. +type SASLAuthenticateRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // SASLAuthBytes contains bytes for a SASL client request. + SASLAuthBytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateRequest) Key() int16 { return 36 } +func (*SASLAuthenticateRequest) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateRequest) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateRequest) ResponseKind() Response { + r := &SASLAuthenticateResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLAuthenticateRequest) RequestWith(ctx context.Context, r Requestor) (*SASLAuthenticateResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLAuthenticateResponse) + return resp, err +} + +func (v *SASLAuthenticateRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateRequest returns a pointer to a default SASLAuthenticateRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateRequest() *SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateRequest. +func (v *SASLAuthenticateRequest) Default() { +} + +// NewSASLAuthenticateRequest returns a default SASLAuthenticateRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateRequest() SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return v +} + +// SASLAuthenticateResponse is returned for a SASLAuthenticateRequest. +type SASLAuthenticateResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is a potential error. + ErrorCode int16 + + // ErrorMessage can contain a message for an error. + ErrorMessage *string + + // SASLAuthBytes is the server challenge continuing SASL flow. + SASLAuthBytes []byte + + // SessionLifetimeMillis, added in Kafka 2.2.0, is how long the SASL + // authentication is valid for. This timeout is only enforced if the request + // was v1. After this timeout, Kafka expects the next bytes on the wire to + // begin reauthentication. Otherwise, Kafka closes the connection. + SessionLifetimeMillis int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateResponse) Key() int16 { return 36 } +func (*SASLAuthenticateResponse) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateResponse) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateResponse) RequestKind() Request { + return &SASLAuthenticateRequest{Version: v.Version} +} + +func (v *SASLAuthenticateResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if version >= 1 { + v := v.SessionLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if version >= 1 { + v := b.Int64() + s.SessionLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateResponse returns a pointer to a default SASLAuthenticateResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateResponse() *SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateResponse. +func (v *SASLAuthenticateResponse) Default() { +} + +// NewSASLAuthenticateResponse returns a default SASLAuthenticateResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateResponse() SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return v +} + +type CreatePartitionsRequestTopicAssignment struct { + // Replicas are replicas to assign a new partition to. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopicAssignment. +func (v *CreatePartitionsRequestTopicAssignment) Default() { +} + +// NewCreatePartitionsRequestTopicAssignment returns a default CreatePartitionsRequestTopicAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopicAssignment() CreatePartitionsRequestTopicAssignment { + var v CreatePartitionsRequestTopicAssignment + v.Default() + return v +} + +type CreatePartitionsRequestTopic struct { + // Topic is a topic for which to create additional partitions for. + Topic string + + // Count is the final count of partitions this topic must have after this + // request. This must be greater than the current number of partitions. + Count int32 + + // Assignment is a two-level array, the first corresponding to new + // partitions, the second contining broker IDs for where new partition + // replicas should live. + // + // The second level, the replicas, cannot have duplicate broker IDs (i.e. + // you cannot replicate a single partition twice on the same broker). + // Additionally, the number of replicas must match the current number of + // replicas per partition on the topic. + // + // The first level's length must be equal to the delta of Count and the + // current number of partitions. + Assignment []CreatePartitionsRequestTopicAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopic. +func (v *CreatePartitionsRequestTopic) Default() { +} + +// NewCreatePartitionsRequestTopic returns a default CreatePartitionsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopic() CreatePartitionsRequestTopic { + var v CreatePartitionsRequestTopic + v.Default() + return v +} + +// CreatePartitionsRequest creates additional partitions for topics. +type CreatePartitionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics to create partitions for. + Topics []CreatePartitionsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no partitions are actually created. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsRequest) Key() int16 { return 37 } +func (*CreatePartitionsRequest) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsRequest) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreatePartitionsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreatePartitionsRequest) IsAdminRequest() {} +func (v *CreatePartitionsRequest) ResponseKind() Response { + r := &CreatePartitionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreatePartitionsRequest) RequestWith(ctx context.Context, r Requestor) (*CreatePartitionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreatePartitionsResponse) + return resp, err +} + +func (v *CreatePartitionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Count + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Count = v + } + { + v := s.Assignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreatePartitionsRequestTopicAssignment{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopicAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Assignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsRequest returns a pointer to a default CreatePartitionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsRequest() *CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequest. +func (v *CreatePartitionsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewCreatePartitionsRequest returns a default CreatePartitionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequest() CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return v +} + +type CreatePartitionsResponseTopic struct { + // Topic is the topic that partitions were requested to be made for. + Topic string + + // ErrorCode is the error code returned for each topic in the request. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to create partitions for a topic. + // + // INVALID_REQUEST is returned for duplicate topics in the request. + // + // INVALID_TOPIC_EXCEPTION is returned if the topic is queued for deletion. + // + // REASSIGNMENT_IN_PROGRESS is returned if the request was issued while + // partitions were being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic for which to create partitions. + // + // INVALID_PARTITIONS is returned if the request would drop the total + // count of partitions down, or if the request would not add any more + // partitions, or if the request uses unknown brokers, or if the request + // assigns a different number of brokers than the increase in the + // partition count. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponseTopic. +func (v *CreatePartitionsResponseTopic) Default() { +} + +// NewCreatePartitionsResponseTopic returns a default CreatePartitionsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponseTopic() CreatePartitionsResponseTopic { + var v CreatePartitionsResponseTopic + v.Default() + return v +} + +// CreatePartitionsResponse is returned from a CreatePartitionsRequest. +type CreatePartitionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics is a response to each topic in the creation request. + Topics []CreatePartitionsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsResponse) Key() int16 { return 37 } +func (*CreatePartitionsResponse) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsResponse) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreatePartitionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreatePartitionsResponse) RequestKind() Request { + return &CreatePartitionsRequest{Version: v.Version} +} + +func (v *CreatePartitionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsResponse returns a pointer to a default CreatePartitionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsResponse() *CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponse. +func (v *CreatePartitionsResponse) Default() { +} + +// NewCreatePartitionsResponse returns a default CreatePartitionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponse() CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return v +} + +type CreateDelegationTokenRequestRenewer struct { + // PrincipalType is the "type" this principal is. This must be "User". + PrincipalType string + + // PrincipalName is the user name allowed to renew the returned token. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequestRenewer. +func (v *CreateDelegationTokenRequestRenewer) Default() { +} + +// NewCreateDelegationTokenRequestRenewer returns a default CreateDelegationTokenRequestRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequestRenewer() CreateDelegationTokenRequestRenewer { + var v CreateDelegationTokenRequestRenewer + v.Default() + return v +} + +// CreateDelegationTokenRequest issues a request to create a delegation token. +// +// Creating delegation tokens allows for an (ideally) quicker and easier method +// of enabling authorization for a wide array of clients. Rather than having to +// manage many passwords external to Kafka, you only need to manage a few +// accounts and use those to create delegation tokens per client. +// +// Note that delegation tokens inherit the same ACLs as the user creating the +// token. Thus, if you want to properly scope ACLs, you should not create +// delegation tokens with admin accounts. +// +// Delegation tokens live inside of Kafka and use SASL SCRAM-SHA-256 for +// authorization. +type CreateDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The principal type of the owner of the token. If null, this defaults + // to the token request principal. + OwnerPrincipalType *string // v3+ + + // Principal name of the owner of the token. If null, this defaults to + // the token request principal. + OwnerPrincipalName *string // v3+ + + // Renewers is a list of who can renew this delegation token. If empty, the + // default is the principal (user) who created the token. + Renewers []CreateDelegationTokenRequestRenewer + + // MaxLifetimeMillis is how long this delegation token will be valid for. + // If -1, the default will be the server's delegation.token.max.lifetime.ms. + MaxLifetimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenRequest) Key() int16 { return 38 } +func (*CreateDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenRequest) ResponseKind() Response { + r := &CreateDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*CreateDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateDelegationTokenResponse) + return resp, err +} + +func (v *CreateDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 3 { + v := v.OwnerPrincipalType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 3 { + v := v.OwnerPrincipalName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.MaxLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalName = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateDelegationTokenRequestRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + { + v := b.Int64() + s.MaxLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenRequest returns a pointer to a default CreateDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenRequest() *CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequest. +func (v *CreateDelegationTokenRequest) Default() { +} + +// NewCreateDelegationTokenRequest returns a default CreateDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequest() CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return v +} + +// CreateDelegationTokenResponse is a response to a CreateDelegationTokenRequest. +type CreateDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // PrincipalType is the type of principal that granted this delegation token. + // This will always be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name of the principal that granted this delegation + // token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp this delegation token was + // issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which this token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID of this token; this will be used as the username for + // scram authentication. + TokenID string + + // HMAC is the password of this token; this will be used as the password for + // scram authentication. + HMAC []byte + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenResponse) Key() int16 { return 38 } +func (*CreateDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *CreateDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreateDelegationTokenResponse) RequestKind() Request { + return &CreateDelegationTokenRequest{Version: v.Version} +} + +func (v *CreateDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenResponse returns a pointer to a default CreateDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenResponse() *CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenResponse. +func (v *CreateDelegationTokenResponse) Default() { +} + +// NewCreateDelegationTokenResponse returns a default CreateDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenResponse() CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return v +} + +// RenewDelegationTokenRequest is a request to renew a delegation token that +// has not yet hit its max timestamp. Note that a client using a token cannot +// renew its own token. +type RenewDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to be renewed. + HMAC []byte + + // RenewTimeMillis is how long to renew the token for. If -1, Kafka uses its + // delegation.token.max.lifetime.ms. + RenewTimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenRequest) Key() int16 { return 39 } +func (*RenewDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenRequest) ResponseKind() Response { + r := &RenewDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *RenewDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*RenewDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*RenewDelegationTokenResponse) + return resp, err +} + +func (v *RenewDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RenewTimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.RenewTimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenRequest returns a pointer to a default RenewDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenRequest() *RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenRequest. +func (v *RenewDelegationTokenRequest) Default() { +} + +// NewRenewDelegationTokenRequest returns a default RenewDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenRequest() RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return v +} + +// RenewDelegationTokenResponse is a response to a RenewDelegationTokenRequest. +type RenewDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenResponse) Key() int16 { return 39 } +func (*RenewDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *RenewDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *RenewDelegationTokenResponse) RequestKind() Request { + return &RenewDelegationTokenRequest{Version: v.Version} +} + +func (v *RenewDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenResponse returns a pointer to a default RenewDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenResponse() *RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenResponse. +func (v *RenewDelegationTokenResponse) Default() { +} + +// NewRenewDelegationTokenResponse returns a default RenewDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenResponse() RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return v +} + +// ExpireDelegationTokenRequest is a request to change the expiry timestamp +// of a delegation token. Note that a client using a token cannot expire its +// own token. +type ExpireDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to change the expiry timestamp of. + HMAC []byte + + // ExpiryPeriodMillis changes the delegation token's expiry timestamp to + // now + expiry time millis. This can be used to force tokens to expire + // quickly, or to allow tokens a grace period before expiry. You cannot + // add enough expiry that exceeds the original max timestamp. + ExpiryPeriodMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenRequest) Key() int16 { return 40 } +func (*ExpireDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenRequest) ResponseKind() Response { + r := &ExpireDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ExpireDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*ExpireDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ExpireDelegationTokenResponse) + return resp, err +} + +func (v *ExpireDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ExpiryPeriodMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.ExpiryPeriodMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenRequest returns a pointer to a default ExpireDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenRequest() *ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenRequest. +func (v *ExpireDelegationTokenRequest) Default() { +} + +// NewExpireDelegationTokenRequest returns a default ExpireDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenRequest() ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return v +} + +// ExpireDelegationTokenResponse is a response to an ExpireDelegationTokenRequest. +type ExpireDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the new timestamp at which the delegation token will + // expire. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenResponse) Key() int16 { return 40 } +func (*ExpireDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *ExpireDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ExpireDelegationTokenResponse) RequestKind() Request { + return &ExpireDelegationTokenRequest{Version: v.Version} +} + +func (v *ExpireDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenResponse returns a pointer to a default ExpireDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenResponse() *ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenResponse. +func (v *ExpireDelegationTokenResponse) Default() { +} + +// NewExpireDelegationTokenResponse returns a default ExpireDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenResponse() ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return v +} + +type DescribeDelegationTokenRequestOwner struct { + // PrincipalType is a type to match to describe delegation tokens created + // with this principal. This would be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name to match to describe delegation tokens created + // with this principal. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequestOwner. +func (v *DescribeDelegationTokenRequestOwner) Default() { +} + +// NewDescribeDelegationTokenRequestOwner returns a default DescribeDelegationTokenRequestOwner +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequestOwner() DescribeDelegationTokenRequestOwner { + var v DescribeDelegationTokenRequestOwner + v.Default() + return v +} + +// DescribeDelegationTokenRequest is a request to describe delegation tokens. +type DescribeDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Owners contains owners to describe delegation tokens for, or null for all. + // If non-null, only tokens created from a matching principal type, name + // combination are printed. + Owners []DescribeDelegationTokenRequestOwner + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenRequest) Key() int16 { return 41 } +func (*DescribeDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenRequest) ResponseKind() Response { + r := &DescribeDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeDelegationTokenResponse) + return resp, err +} + +func (v *DescribeDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Owners + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Owners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeDelegationTokenRequestOwner{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenRequestOwner, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Owners = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenRequest returns a pointer to a default DescribeDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenRequest() *DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequest. +func (v *DescribeDelegationTokenRequest) Default() { +} + +// NewDescribeDelegationTokenRequest returns a default DescribeDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequest() DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetailRenewer struct { + PrincipalType string + + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetailRenewer. +func (v *DescribeDelegationTokenResponseTokenDetailRenewer) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetailRenewer returns a default DescribeDelegationTokenResponseTokenDetailRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetailRenewer() DescribeDelegationTokenResponseTokenDetailRenewer { + var v DescribeDelegationTokenResponseTokenDetailRenewer + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetail struct { + // PrincipalType is the principal type of who created this token. + PrincipalType string + + // PrincipalName is the principal name of who created this token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp of when this token was issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp of when this token will expire. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which whis token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID (scram username) of this token. + TokenID string + + // HMAC is the password of this token. + HMAC []byte + + // Renewers is a list of users that can renew this token. + Renewers []DescribeDelegationTokenResponseTokenDetailRenewer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetail. +func (v *DescribeDelegationTokenResponseTokenDetail) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetail returns a default DescribeDelegationTokenResponseTokenDetail +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetail() DescribeDelegationTokenResponseTokenDetail { + var v DescribeDelegationTokenResponseTokenDetail + v.Default() + return v +} + +// DescribeDelegationTokenResponsee is a response to a DescribeDelegationTokenRequest. +type DescribeDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // TokenDetails shows information about each token created from any principal + // in the request. + TokenDetails []DescribeDelegationTokenResponseTokenDetail + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenResponse) Key() int16 { return 41 } +func (*DescribeDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *DescribeDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeDelegationTokenResponse) RequestKind() Request { + return &DescribeDelegationTokenRequest{Version: v.Version} +} + +func (v *DescribeDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TokenDetails + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.TokenDetails + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetail, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetailRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TokenDetails = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenResponse returns a pointer to a default DescribeDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenResponse() *DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponse. +func (v *DescribeDelegationTokenResponse) Default() { +} + +// NewDescribeDelegationTokenResponse returns a default DescribeDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponse() DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return v +} + +// DeleteGroupsRequest deletes consumer groups. This request was added for +// Kafka 1.1.0 corresponding to the removal of RetentionTimeMillis from +// OffsetCommitRequest. See KIP-229 for more details. +type DeleteGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is a list of groups to delete. + Groups []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsRequest) Key() int16 { return 42 } +func (*DeleteGroupsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DeleteGroupsRequest) ResponseKind() Response { + r := &DeleteGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteGroupsResponse) + return resp, err +} + +func (v *DeleteGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsRequest returns a pointer to a default DeleteGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsRequest() *DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsRequest. +func (v *DeleteGroupsRequest) Default() { +} + +// NewDeleteGroupsRequest returns a default DeleteGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsRequest() DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return v +} + +type DeleteGroupsResponseGroup struct { + // Group is a group ID requested for deletion. + Group string + + // ErrorCode is the error code returned for this group's deletion request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + // + // NON_EMPTY_GROUP is returned if attempting to delete a group that is + // not in the empty state. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponseGroup. +func (v *DeleteGroupsResponseGroup) Default() { +} + +// NewDeleteGroupsResponseGroup returns a default DeleteGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponseGroup() DeleteGroupsResponseGroup { + var v DeleteGroupsResponseGroup + v.Default() + return v +} + +// DeleteGroupsResponse is returned from a DeleteGroupsRequest. +type DeleteGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Groups are the responses to each group requested for deletion. + Groups []DeleteGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsResponse) Key() int16 { return 42 } +func (*DeleteGroupsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteGroupsResponse) RequestKind() Request { return &DeleteGroupsRequest{Version: v.Version} } + +func (v *DeleteGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsResponse returns a pointer to a default DeleteGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsResponse() *DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponse. +func (v *DeleteGroupsResponse) Default() { +} + +// NewDeleteGroupsResponse returns a default DeleteGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponse() DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return v +} + +type ElectLeadersRequestTopic struct { + // Topic is a topic to trigger leader elections for (but only for the + // partitions below). + Topic string + + // Partitions is an array of partitions in a topic to trigger leader + // elections for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequestTopic. +func (v *ElectLeadersRequestTopic) Default() { +} + +// NewElectLeadersRequestTopic returns a default ElectLeadersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequestTopic() ElectLeadersRequestTopic { + var v ElectLeadersRequestTopic + v.Default() + return v +} + +// ElectLeadersRequest begins a leader election for all given topic +// partitions. This request was added in Kafka 2.2.0 to replace the zookeeper +// only option of triggering leader elections before. See KIP-183 for more +// details. KIP-460 introduced the ElectionType field with Kafka 2.4.0. +type ElectLeadersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ElectionType is the type of election to conduct. 0 elects the preferred + // replica, 1 elects the first live replica if there are no in-sync replicas + // (i.e., unclean leader election). + ElectionType int8 // v1+ + + // Topics is an array of topics and corresponding partitions to + // trigger leader elections for, or null for all. + Topics []ElectLeadersRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersRequest) Key() int16 { return 43 } +func (*ElectLeadersRequest) MaxVersion() int16 { return 2 } +func (v *ElectLeadersRequest) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersRequest) GetVersion() int16 { return v.Version } +func (v *ElectLeadersRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ElectLeadersRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ElectLeadersRequest) IsAdminRequest() {} +func (v *ElectLeadersRequest) ResponseKind() Response { + r := &ElectLeadersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ElectLeadersRequest) RequestWith(ctx context.Context, r Requestor) (*ElectLeadersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ElectLeadersResponse) + return resp, err +} + +func (v *ElectLeadersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 1 { + v := v.ElectionType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int8() + s.ElectionType = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ElectLeadersRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersRequest returns a pointer to a default ElectLeadersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersRequest() *ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequest. +func (v *ElectLeadersRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewElectLeadersRequest returns a default ElectLeadersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequest() ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return v +} + +type ElectLeadersResponseTopicPartition struct { + // Partition is the partition for this result. + Partition int32 + + // ErrorCode is the error code returned for this topic/partition leader + // election. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to trigger leader elections. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic/partition does + // not exist on any broker in the cluster (this is slightly different + // from the usual meaning of a single broker not knowing of the topic + // partition). + // + // PREFERRED_LEADER_NOT_AVAILABLE is returned if the preferred leader + // could not be elected (for example, the preferred leader was not in + // the ISR). + ErrorCode int16 + + // ErrorMessage is an informative message if the leader election failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopicPartition. +func (v *ElectLeadersResponseTopicPartition) Default() { +} + +// NewElectLeadersResponseTopicPartition returns a default ElectLeadersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopicPartition() ElectLeadersResponseTopicPartition { + var v ElectLeadersResponseTopicPartition + v.Default() + return v +} + +type ElectLeadersResponseTopic struct { + // Topic is topic for the given partition results below. + Topic string + + // Partitions contains election results for a topic's partitions. + Partitions []ElectLeadersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopic. +func (v *ElectLeadersResponseTopic) Default() { +} + +// NewElectLeadersResponseTopic returns a default ElectLeadersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopic() ElectLeadersResponseTopic { + var v ElectLeadersResponseTopic + v.Default() + return v +} + +// ElectLeadersResponse is a response for an ElectLeadersRequest. +type ElectLeadersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error that applies to all partitions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 // v1+ + + // Topics contains leader election results for each requested topic. + Topics []ElectLeadersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersResponse) Key() int16 { return 43 } +func (*ElectLeadersResponse) MaxVersion() int16 { return 2 } +func (v *ElectLeadersResponse) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersResponse) GetVersion() int16 { return v.Version } +func (v *ElectLeadersResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ElectLeadersResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ElectLeadersResponse) RequestKind() Request { return &ElectLeadersRequest{Version: v.Version} } + +func (v *ElectLeadersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 1 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersResponse returns a pointer to a default ElectLeadersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersResponse() *ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponse. +func (v *ElectLeadersResponse) Default() { +} + +// NewElectLeadersResponse returns a default ElectLeadersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponse() ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResourceConfig struct { + // Name is a key to modify (e.g. segment.bytes). + // + // For broker loggers, see KIP-412 section "Request/Response Overview" + // for details on how to change per logger log levels. + Name string + + // Op is the type of operation to perform for this config name. + // + // SET (0) is to set a configuration value; the value must not be null. + // + // DELETE (1) is to delete a configuration key. + // + // APPEND (2) is to add a value to the list of values for a key (if the + // key is for a list of values). + // + // SUBTRACT (3) is to remove a value from a list of values (if the key + // is for a list of values). + Op IncrementalAlterConfigOp + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResourceConfig. +func (v *IncrementalAlterConfigsRequestResourceConfig) Default() { +} + +// NewIncrementalAlterConfigsRequestResourceConfig returns a default IncrementalAlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResourceConfig() IncrementalAlterConfigsRequestResourceConfig { + var v IncrementalAlterConfigsRequestResourceConfig + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []IncrementalAlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResource. +func (v *IncrementalAlterConfigsRequestResource) Default() { +} + +// NewIncrementalAlterConfigsRequestResource returns a default IncrementalAlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResource() IncrementalAlterConfigsRequestResource { + var v IncrementalAlterConfigsRequestResource + v.Default() + return v +} + +// IncrementalAlterConfigsRequest issues ar equest to alter either topic or +// broker configs. +// +// This API was added in Kafka 2.3.0 to replace AlterConfigs. The key benefit +// of this API is that consumers do not need to know the full config state +// to add or remove new config options. See KIP-339 for more details. +type IncrementalAlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []IncrementalAlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsRequest) Key() int16 { return 44 } +func (*IncrementalAlterConfigsRequest) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsRequest) ResponseKind() Response { + r := &IncrementalAlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *IncrementalAlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*IncrementalAlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*IncrementalAlterConfigsResponse) + return resp, err +} + +func (v *IncrementalAlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Op + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var t IncrementalAlterConfigOp + { + v := b.Int8() + t = IncrementalAlterConfigOp(v) + } + v := t + s.Op = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsRequest returns a pointer to a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsRequest() *IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequest. +func (v *IncrementalAlterConfigsRequest) Default() { +} + +// NewIncrementalAlterConfigsRequest returns a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequest() IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return v +} + +type IncrementalAlterConfigsResponseResource struct { + // ErrorCode is the error code returned for incrementally altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the incremental alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the incremental alter config + // request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponseResource. +func (v *IncrementalAlterConfigsResponseResource) Default() { +} + +// NewIncrementalAlterConfigsResponseResource returns a default IncrementalAlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponseResource() IncrementalAlterConfigsResponseResource { + var v IncrementalAlterConfigsResponseResource + v.Default() + return v +} + +// IncrementalAlterConfigsResponse is returned from an IncrementalAlterConfigsRequest. +type IncrementalAlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Resources are responses for each resources in the alter request. + Resources []IncrementalAlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsResponse) Key() int16 { return 44 } +func (*IncrementalAlterConfigsResponse) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *IncrementalAlterConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *IncrementalAlterConfigsResponse) RequestKind() Request { + return &IncrementalAlterConfigsRequest{Version: v.Version} +} + +func (v *IncrementalAlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsResponse returns a pointer to a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsResponse() *IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponse. +func (v *IncrementalAlterConfigsResponse) Default() { +} + +// NewIncrementalAlterConfigsResponse returns a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponse() IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopicPartition struct { + // Partition is a partition to reassign. + Partition int32 + + // Replicas are replicas to place the partition on, or null to + // cancel a pending reassignment of this partition. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopicPartition. +func (v *AlterPartitionAssignmentsRequestTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopicPartition returns a default AlterPartitionAssignmentsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopicPartition() AlterPartitionAssignmentsRequestTopicPartition { + var v AlterPartitionAssignmentsRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopic struct { + // Topic is a topic to reassign the partitions of. + Topic string + + // Partitions contains partitions to reassign. + Partitions []AlterPartitionAssignmentsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopic. +func (v *AlterPartitionAssignmentsRequestTopic) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopic returns a default AlterPartitionAssignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopic() AlterPartitionAssignmentsRequestTopic { + var v AlterPartitionAssignmentsRequestTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to reassign partitions to certain brokers. +// +// ACL wise, this requires ALTER on CLUSTER. +type AlterPartitionAssignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics for which to reassign partitions of. + Topics []AlterPartitionAssignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsRequest) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsRequest) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *AlterPartitionAssignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *AlterPartitionAssignmentsRequest) IsAdminRequest() {} +func (v *AlterPartitionAssignmentsRequest) ResponseKind() Response { + r := &AlterPartitionAssignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionAssignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionAssignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionAssignmentsResponse) + return resp, err +} + +func (v *AlterPartitionAssignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []int32{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsRequest returns a pointer to a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsRequest() *AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequest. +func (v *AlterPartitionAssignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewAlterPartitionAssignmentsRequest returns a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequest() AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is the error code returned for partition reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + // + // NO_REASSIGNMENT_IN_PROGRESS is returned for partition reassignment + // cancellations when the partition was not being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic or the topic is being deleted. + ErrorCode int16 + + // ErrorMessage is an informative message if the partition reassignment failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopicPartition. +func (v *AlterPartitionAssignmentsResponseTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopicPartition returns a default AlterPartitionAssignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopicPartition() AlterPartitionAssignmentsResponseTopicPartition { + var v AlterPartitionAssignmentsResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []AlterPartitionAssignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopic. +func (v *AlterPartitionAssignmentsResponseTopic) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopic returns a default AlterPartitionAssignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopic() AlterPartitionAssignmentsResponseTopic { + var v AlterPartitionAssignmentsResponseTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsResponse is returned for an AlterPartitionAssignmentsRequest. +type AlterPartitionAssignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any global (applied to all partitions) error code. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []AlterPartitionAssignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsResponse) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsResponse) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterPartitionAssignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterPartitionAssignmentsResponse) RequestKind() Request { + return &AlterPartitionAssignmentsRequest{Version: v.Version} +} + +func (v *AlterPartitionAssignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsResponse returns a pointer to a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsResponse() *AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponse. +func (v *AlterPartitionAssignmentsResponse) Default() { +} + +// NewAlterPartitionAssignmentsResponse returns a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponse() AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return v +} + +type ListPartitionReassignmentsRequestTopic struct { + // Topic is a topic to list in progress partition reassingments of. + Topic string + + // Partitions are partitions to list in progress reassignments of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequestTopic. +func (v *ListPartitionReassignmentsRequestTopic) Default() { +} + +// NewListPartitionReassignmentsRequestTopic returns a default ListPartitionReassignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequestTopic() ListPartitionReassignmentsRequestTopic { + var v ListPartitionReassignmentsRequestTopic + v.Default() + return v +} + +// ListPartitionReassignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to list in progress partition reassignments. +// +// ACL wise, this requires DESCRIBE on CLUSTER. +type ListPartitionReassignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics to list in progress partition reassignments of, or null + // to list everything. + Topics []ListPartitionReassignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsRequest) Key() int16 { return 46 } +func (*ListPartitionReassignmentsRequest) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsRequest) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ListPartitionReassignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *ListPartitionReassignmentsRequest) IsAdminRequest() {} +func (v *ListPartitionReassignmentsRequest) ResponseKind() Response { + r := &ListPartitionReassignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListPartitionReassignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*ListPartitionReassignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListPartitionReassignmentsResponse) + return resp, err +} + +func (v *ListPartitionReassignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ListPartitionReassignmentsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsRequest returns a pointer to a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsRequest() *ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequest. +func (v *ListPartitionReassignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewListPartitionReassignmentsRequest returns a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequest() ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // Replicas is the partition's current replicas. + Replicas []int32 + + // AddingReplicas are replicas currently being added to the partition. + AddingReplicas []int32 + + // RemovingReplicas are replicas currently being removed from the partition. + RemovingReplicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopicPartition. +func (v *ListPartitionReassignmentsResponseTopicPartition) Default() { +} + +// NewListPartitionReassignmentsResponseTopicPartition returns a default ListPartitionReassignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopicPartition() ListPartitionReassignmentsResponseTopicPartition { + var v ListPartitionReassignmentsResponseTopicPartition + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []ListPartitionReassignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopic. +func (v *ListPartitionReassignmentsResponseTopic) Default() { +} + +// NewListPartitionReassignmentsResponseTopic returns a default ListPartitionReassignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopic() ListPartitionReassignmentsResponseTopic { + var v ListPartitionReassignmentsResponseTopic + v.Default() + return v +} + +// ListPartitionReassignmentsResponse is returned for a ListPartitionReassignmentsRequest. +type ListPartitionReassignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error code returned for listing reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []ListPartitionReassignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsResponse) Key() int16 { return 46 } +func (*ListPartitionReassignmentsResponse) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsResponse) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ListPartitionReassignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListPartitionReassignmentsResponse) RequestKind() Request { + return &ListPartitionReassignmentsRequest{Version: v.Version} +} + +func (v *ListPartitionReassignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsResponse returns a pointer to a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsResponse() *ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponse. +func (v *ListPartitionReassignmentsResponse) Default() { +} + +// NewListPartitionReassignmentsResponse returns a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponse() ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return v +} + +type OffsetDeleteRequestTopicPartition struct { + // Partition is a partition to delete offsets for. + Partition int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopicPartition. +func (v *OffsetDeleteRequestTopicPartition) Default() { +} + +// NewOffsetDeleteRequestTopicPartition returns a default OffsetDeleteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopicPartition() OffsetDeleteRequestTopicPartition { + var v OffsetDeleteRequestTopicPartition + v.Default() + return v +} + +type OffsetDeleteRequestTopic struct { + // Topic is a topic to delete offsets in. + Topic string + + // Partitions are partitions to delete offsets for. + Partitions []OffsetDeleteRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopic. +func (v *OffsetDeleteRequestTopic) Default() { +} + +// NewOffsetDeleteRequestTopic returns a default OffsetDeleteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopic() OffsetDeleteRequestTopic { + var v OffsetDeleteRequestTopic + v.Default() + return v +} + +// OffsetDeleteRequest, proposed in KIP-496 and implemented in Kafka 2.4.0, is +// a request to delete group offsets. +// +// ACL wise, this requires DELETE on GROUP for the group and READ on TOPIC for +// each topic. +type OffsetDeleteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to delete offsets in. + Group string + + // Topics are topics to delete offsets in. + Topics []OffsetDeleteRequestTopic +} + +func (*OffsetDeleteRequest) Key() int16 { return 47 } +func (*OffsetDeleteRequest) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteRequest) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteRequest) IsFlexible() bool { return false } +func (v *OffsetDeleteRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetDeleteRequest) ResponseKind() Response { + r := &OffsetDeleteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetDeleteRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetDeleteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetDeleteResponse) + return resp, err +} + +func (v *OffsetDeleteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteRequest returns a pointer to a default OffsetDeleteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteRequest() *OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequest. +func (v *OffsetDeleteRequest) Default() { +} + +// NewOffsetDeleteRequest returns a default OffsetDeleteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequest() OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return v +} + +type OffsetDeleteResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is any per partition error code. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // GROUP_SUBSCRIBED_TO_TOPIC is returned if the topic is still subscribed to. + ErrorCode int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopicPartition. +func (v *OffsetDeleteResponseTopicPartition) Default() { +} + +// NewOffsetDeleteResponseTopicPartition returns a default OffsetDeleteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopicPartition() OffsetDeleteResponseTopicPartition { + var v OffsetDeleteResponseTopicPartition + v.Default() + return v +} + +type OffsetDeleteResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions are partitions being responded to. + Partitions []OffsetDeleteResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopic. +func (v *OffsetDeleteResponseTopic) Default() { +} + +// NewOffsetDeleteResponseTopic returns a default OffsetDeleteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopic() OffsetDeleteResponseTopic { + var v OffsetDeleteResponseTopic + v.Default() + return v +} + +// OffsetDeleteResponse is a response to an offset delete request. +type OffsetDeleteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any group wide error. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + ErrorCode int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Topics are responses to requested topics. + Topics []OffsetDeleteResponseTopic +} + +func (*OffsetDeleteResponse) Key() int16 { return 47 } +func (*OffsetDeleteResponse) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteResponse) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteResponse) IsFlexible() bool { return false } +func (v *OffsetDeleteResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *OffsetDeleteResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetDeleteResponse) RequestKind() Request { return &OffsetDeleteRequest{Version: v.Version} } + +func (v *OffsetDeleteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteResponse returns a pointer to a default OffsetDeleteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteResponse() *OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponse. +func (v *OffsetDeleteResponse) Default() { +} + +// NewOffsetDeleteResponse returns a default OffsetDeleteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponse() OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return v +} + +type DescribeClientQuotasRequestComponent struct { + // EntityType is the entity component type that this filter component + // applies to; some possible values are "user" or "client-id". + EntityType string + + // MatchType specifies how to match an entity, + // with 0 meaning match on the name exactly, + // 1 meaning match on the default name, + // and 2 meaning any specified name. + MatchType QuotasMatchType + + // Match is the string to match against, or null if unused for the given + // match type. + Match *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequestComponent. +func (v *DescribeClientQuotasRequestComponent) Default() { +} + +// NewDescribeClientQuotasRequestComponent returns a default DescribeClientQuotasRequestComponent +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequestComponent() DescribeClientQuotasRequestComponent { + var v DescribeClientQuotasRequestComponent + v.Default() + return v +} + +// DescribeClientQuotasRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to describe client quotas. +type DescribeClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Components is a list of match filters to apply for describing quota entities. + Components []DescribeClientQuotasRequestComponent + + // Strict signifies whether matches are strict; if true, the response + // excludes entities with unspecified entity types. + Strict bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasRequest) Key() int16 { return 48 } +func (*DescribeClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasRequest) ResponseKind() Response { + r := &DescribeClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClientQuotasResponse) + return resp, err +} + +func (v *DescribeClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Components + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.EntityType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MatchType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Match + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Strict + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Components + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasRequestComponent, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.EntityType = v + } + { + var t QuotasMatchType + { + v := b.Int8() + t = QuotasMatchType(v) + } + v := t + s.MatchType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Match = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Components = v + } + { + v := b.Bool() + s.Strict = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasRequest returns a pointer to a default DescribeClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasRequest() *DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequest. +func (v *DescribeClientQuotasRequest) Default() { +} + +// NewDescribeClientQuotasRequest returns a default DescribeClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequest() DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryEntity struct { + // Type is the entity type. + Type string + + // Name is the entity name, or null if the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryEntity. +func (v *DescribeClientQuotasResponseEntryEntity) Default() { +} + +// NewDescribeClientQuotasResponseEntryEntity returns a default DescribeClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryEntity() DescribeClientQuotasResponseEntryEntity { + var v DescribeClientQuotasResponseEntryEntity + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryValue struct { + // Key is the quota configuration key. + Key string + + // Value is the quota configuration value. + Value float64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryValue. +func (v *DescribeClientQuotasResponseEntryValue) Default() { +} + +// NewDescribeClientQuotasResponseEntryValue returns a default DescribeClientQuotasResponseEntryValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryValue() DescribeClientQuotasResponseEntryValue { + var v DescribeClientQuotasResponseEntryValue + v.Default() + return v +} + +type DescribeClientQuotasResponseEntry struct { + // Entity contains the quota entity components being described. + Entity []DescribeClientQuotasResponseEntryEntity + + // Values are quota values for the entity. + Values []DescribeClientQuotasResponseEntryValue + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntry. +func (v *DescribeClientQuotasResponseEntry) Default() { +} + +// NewDescribeClientQuotasResponseEntry returns a default DescribeClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntry() DescribeClientQuotasResponseEntry { + var v DescribeClientQuotasResponseEntry + v.Default() + return v +} + +// DescribeClientQuotasResponse is a response for a DescribeClientQuotasRequest. +type DescribeClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error for the request. + ErrorCode int16 + + // ErrorMessage is an error message for the request, or null if the request succeeded. + ErrorMessage *string + + // Entries contains entities that were matched. + Entries []DescribeClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasResponse) Key() int16 { return 48 } +func (*DescribeClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClientQuotasResponse) RequestKind() Request { + return &DescribeClientQuotasRequest{Version: v.Version} +} + +func (v *DescribeClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Values + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeClientQuotasResponseEntry{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Values + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryValue, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Values = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasResponse returns a pointer to a default DescribeClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasResponse() *DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponse. +func (v *DescribeClientQuotasResponse) Default() { +} + +// NewDescribeClientQuotasResponse returns a default DescribeClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponse() DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return v +} + +type AlterClientQuotasRequestEntryEntity struct { + // Type is the entity component's type; e.g. "client-id", "user" or "ip". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryEntity. +func (v *AlterClientQuotasRequestEntryEntity) Default() { +} + +// NewAlterClientQuotasRequestEntryEntity returns a default AlterClientQuotasRequestEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryEntity() AlterClientQuotasRequestEntryEntity { + var v AlterClientQuotasRequestEntryEntity + v.Default() + return v +} + +type AlterClientQuotasRequestEntryOp struct { + // Key is the quota configuration key to alter. + Key string + + // Value is the value to set; ignored if remove is true. + Value float64 + + // Remove is whether the quota configuration value should be removed or set. + Remove bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryOp. +func (v *AlterClientQuotasRequestEntryOp) Default() { +} + +// NewAlterClientQuotasRequestEntryOp returns a default AlterClientQuotasRequestEntryOp +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryOp() AlterClientQuotasRequestEntryOp { + var v AlterClientQuotasRequestEntryOp + v.Default() + return v +} + +type AlterClientQuotasRequestEntry struct { + // Entity contains the components of a quota entity to alter. + Entity []AlterClientQuotasRequestEntryEntity + + // Ops contains quota configuration entries to alter. + Ops []AlterClientQuotasRequestEntryOp + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntry. +func (v *AlterClientQuotasRequestEntry) Default() { +} + +// NewAlterClientQuotasRequestEntry returns a default AlterClientQuotasRequestEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntry() AlterClientQuotasRequestEntry { + var v AlterClientQuotasRequestEntry + v.Default() + return v +} + +// AlterClientQuotaRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to alter client quotas. +type AlterClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Entries are quota configuration entries to alter. + Entries []AlterClientQuotasRequestEntry + + // ValidateOnly is makes this request a dry-run; the alteration is validated + // but not performed. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasRequest) Key() int16 { return 49 } +func (*AlterClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasRequest) ResponseKind() Response { + r := &AlterClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*AlterClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterClientQuotasResponse) + return resp, err +} + +func (v *AlterClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Ops + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + { + v := v.Remove + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Ops + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryOp, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + { + v := b.Bool() + s.Remove = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Ops = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasRequest returns a pointer to a default AlterClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasRequest() *AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequest. +func (v *AlterClientQuotasRequest) Default() { +} + +// NewAlterClientQuotasRequest returns a default AlterClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequest() AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return v +} + +type AlterClientQuotasResponseEntryEntity struct { + // Type is the entity component's type; e.g. "client-id" or "user". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntryEntity. +func (v *AlterClientQuotasResponseEntryEntity) Default() { +} + +// NewAlterClientQuotasResponseEntryEntity returns a default AlterClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntryEntity() AlterClientQuotasResponseEntryEntity { + var v AlterClientQuotasResponseEntryEntity + v.Default() + return v +} + +type AlterClientQuotasResponseEntry struct { + // ErrorCode is the error code for an alter on a matched entity. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter on this entity failed. + ErrorMessage *string + + // Entity contains the components of a matched entity. + Entity []AlterClientQuotasResponseEntryEntity + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntry. +func (v *AlterClientQuotasResponseEntry) Default() { +} + +// NewAlterClientQuotasResponseEntry returns a default AlterClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntry() AlterClientQuotasResponseEntry { + var v AlterClientQuotasResponseEntry + v.Default() + return v +} + +// AlterClientQuotasResponse is a response to an AlterClientQuotasRequest. +type AlterClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Entries contains results for the alter request. + Entries []AlterClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasResponse) Key() int16 { return 49 } +func (*AlterClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterClientQuotasResponse) RequestKind() Request { + return &AlterClientQuotasRequest{Version: v.Version} +} + +func (v *AlterClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasResponse returns a pointer to a default AlterClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasResponse() *AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponse. +func (v *AlterClientQuotasResponse) Default() { +} + +// NewAlterClientQuotasResponse returns a default AlterClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponse() AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsRequestUser struct { + // The user name. + Name string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequestUser. +func (v *DescribeUserSCRAMCredentialsRequestUser) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequestUser returns a default DescribeUserSCRAMCredentialsRequestUser +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequestUser() DescribeUserSCRAMCredentialsRequestUser { + var v DescribeUserSCRAMCredentialsRequestUser + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, describes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires DESCRIBE on CLUSTER. +type DescribeUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The users to describe, or null to describe all. + Users []DescribeUserSCRAMCredentialsRequestUser + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsRequest) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &DescribeUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *DescribeUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Users + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Users + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeUserSCRAMCredentialsRequestUser{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsRequestUser, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Users = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsRequest returns a pointer to a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsRequest() *DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequest. +func (v *DescribeUserSCRAMCredentialsRequest) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequest returns a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequest() DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResultCredentialInfo struct { + // The SCRAM mechanism for this user, where 0 is UNKNOWN, 1 is SCRAM-SHA-256, + // and 2 is SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations used in the SCRAM credential. + Iterations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResultCredentialInfo. +func (v *DescribeUserSCRAMCredentialsResponseResultCredentialInfo) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo returns a default DescribeUserSCRAMCredentialsResponseResultCredentialInfo +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo() DescribeUserSCRAMCredentialsResponseResultCredentialInfo { + var v DescribeUserSCRAMCredentialsResponseResultCredentialInfo + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + // + // RESOURCE_NOT_FOUND if the user does not exist or has no credentials. + // + // DUPLICATE_RESOURCE if the user is requested twice+. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // Information about the SCRAM credentials for this user. + CredentialInfos []DescribeUserSCRAMCredentialsResponseResultCredentialInfo + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResult. +func (v *DescribeUserSCRAMCredentialsResponseResult) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResult returns a default DescribeUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResult() DescribeUserSCRAMCredentialsResponseResult { + var v DescribeUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsResponse is a response for a +// DescribeUserSCRAMCredentialsRequest. +type DescribeUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The request-level error code. This is 0 except for auth or infra issues. + // + // CLUSTER_AUTHORIZATION_FAILED if you do not have DESCRIBE on CLUSTER. + ErrorCode int16 + + // The request-level error message, if any. + ErrorMessage *string + + // Results for descriptions, one per user. + Results []DescribeUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsResponse) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeUserSCRAMCredentialsResponse) RequestKind() Request { + return &DescribeUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *DescribeUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.CredentialInfos + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.CredentialInfos + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResultCredentialInfo, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CredentialInfos = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsResponse returns a pointer to a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsResponse() *DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponse. +func (v *DescribeUserSCRAMCredentialsResponse) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponse returns a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponse() DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestDeletion struct { + // The user name to match for removal. + Name string + + // The mechanism for the user name to remove. + Mechanism int8 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestDeletion. +func (v *AlterUserSCRAMCredentialsRequestDeletion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestDeletion returns a default AlterUserSCRAMCredentialsRequestDeletion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestDeletion() AlterUserSCRAMCredentialsRequestDeletion { + var v AlterUserSCRAMCredentialsRequestDeletion + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestUpsertion struct { + // The user name to use. + Name string + + // The mechanism to use for creating, where 1 is SCRAM-SHA-256 and 2 is + // SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations to use. This must be more than the minimum for + // the mechanism and cannot be more than 16384. + Iterations int32 + + // A random salt generated by the client. + Salt []byte + + // The salted password to use. + SaltedPassword []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestUpsertion. +func (v *AlterUserSCRAMCredentialsRequestUpsertion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestUpsertion returns a default AlterUserSCRAMCredentialsRequestUpsertion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestUpsertion() AlterUserSCRAMCredentialsRequestUpsertion { + var v AlterUserSCRAMCredentialsRequestUpsertion + v.Default() + return v +} + +// AlterUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, alters or deletes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires ALTER on CLUSTER. +type AlterUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The SCRAM credentials to remove. + Deletions []AlterUserSCRAMCredentialsRequestDeletion + + // The SCRAM credentials to update or insert. + Upsertions []AlterUserSCRAMCredentialsRequestUpsertion + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsRequest) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsRequest) IsAdminRequest() {} +func (v *AlterUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &AlterUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *AlterUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Deletions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Upsertions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Salt + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.SaltedPassword + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Deletions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestDeletion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Deletions = v + } + { + v := s.Upsertions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestUpsertion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Salt = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SaltedPassword = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Upsertions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsRequest returns a pointer to a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsRequest() *AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequest. +func (v *AlterUserSCRAMCredentialsRequest) Default() { +} + +// NewAlterUserSCRAMCredentialsRequest returns a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequest() AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return v +} + +type AlterUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponseResult. +func (v *AlterUserSCRAMCredentialsResponseResult) Default() { +} + +// NewAlterUserSCRAMCredentialsResponseResult returns a default AlterUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponseResult() AlterUserSCRAMCredentialsResponseResult { + var v AlterUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// AlterUserSCRAMCredentialsResponse is a response for an +// AlterUserSCRAMCredentialsRequest. +type AlterUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The results for deletions and upsertions. + Results []AlterUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsResponse) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterUserSCRAMCredentialsResponse) RequestKind() Request { + return &AlterUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *AlterUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsResponse returns a pointer to a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsResponse() *AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponse. +func (v *AlterUserSCRAMCredentialsResponse) Default() { +} + +// NewAlterUserSCRAMCredentialsResponse returns a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponse() AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return v +} + +type VoteRequestTopicPartition struct { + Partition int32 + + // The bumped epoch of the candidate sending the request. + CandidateEpoch int32 + + // The ID of the voter sending the request. + CandidateID int32 + + // The epoch of the last record written to the metadata log. + LastOffsetEpoch int32 + + // The offset of the last record written to the metadata log. + LastOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopicPartition. +func (v *VoteRequestTopicPartition) Default() { +} + +// NewVoteRequestTopicPartition returns a default VoteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopicPartition() VoteRequestTopicPartition { + var v VoteRequestTopicPartition + v.Default() + return v +} + +type VoteRequestTopic struct { + Topic string + + Partitions []VoteRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopic. +func (v *VoteRequestTopic) Default() { +} + +// NewVoteRequestTopic returns a default VoteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopic() VoteRequestTopic { + var v VoteRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// VoteRequest is used by voters to hold a leader election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type VoteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []VoteRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteRequest) Key() int16 { return 52 } +func (*VoteRequest) MaxVersion() int16 { return 0 } +func (v *VoteRequest) SetVersion(version int16) { v.Version = version } +func (v *VoteRequest) GetVersion() int16 { return v.Version } +func (v *VoteRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteRequest) IsAdminRequest() {} +func (v *VoteRequest) ResponseKind() Response { + r := &VoteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *VoteRequest) RequestWith(ctx context.Context, r Requestor) (*VoteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*VoteResponse) + return resp, err +} + +func (v *VoteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffsetEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CandidateEpoch = v + } + { + v := b.Int32() + s.CandidateID = v + } + { + v := b.Int32() + s.LastOffsetEpoch = v + } + { + v := b.Int64() + s.LastOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteRequest returns a pointer to a default VoteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteRequest() *VoteRequest { + var v VoteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequest. +func (v *VoteRequest) Default() { +} + +// NewVoteRequest returns a default VoteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequest() VoteRequest { + var v VoteRequest + v.Default() + return v +} + +type VoteResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + // Whether the vote was granted. + VoteGranted bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopicPartition. +func (v *VoteResponseTopicPartition) Default() { +} + +// NewVoteResponseTopicPartition returns a default VoteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopicPartition() VoteResponseTopicPartition { + var v VoteResponseTopicPartition + v.Default() + return v +} + +type VoteResponseTopic struct { + Topic string + + Partitions []VoteResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopic. +func (v *VoteResponseTopic) Default() { +} + +// NewVoteResponseTopic returns a default VoteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopic() VoteResponseTopic { + var v VoteResponseTopic + v.Default() + return v +} + +type VoteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []VoteResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteResponse) Key() int16 { return 52 } +func (*VoteResponse) MaxVersion() int16 { return 0 } +func (v *VoteResponse) SetVersion(version int16) { v.Version = version } +func (v *VoteResponse) GetVersion() int16 { return v.Version } +func (v *VoteResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteResponse) RequestKind() Request { return &VoteRequest{Version: v.Version} } + +func (v *VoteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.VoteGranted + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.VoteGranted = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteResponse returns a pointer to a default VoteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteResponse() *VoteResponse { + var v VoteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponse. +func (v *VoteResponse) Default() { +} + +// NewVoteResponse returns a default VoteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponse() VoteResponse { + var v VoteResponse + v.Default() + return v +} + +type BeginQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The ID of the newly elected leader. + LeaderID int32 + + // The epoch of the newly elected leader. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopicPartition. +func (v *BeginQuorumEpochRequestTopicPartition) Default() { +} + +// NewBeginQuorumEpochRequestTopicPartition returns a default BeginQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopicPartition() BeginQuorumEpochRequestTopicPartition { + var v BeginQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochRequestTopic struct { + Topic string + + Partitions []BeginQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopic. +func (v *BeginQuorumEpochRequestTopic) Default() { +} + +// NewBeginQuorumEpochRequestTopic returns a default BeginQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopic() BeginQuorumEpochRequestTopic { + var v BeginQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// BeginQuorumEpochRequest is sent by a leader (once it has enough votes) +// to all voters in the election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type BeginQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []BeginQuorumEpochRequestTopic +} + +func (*BeginQuorumEpochRequest) Key() int16 { return 53 } +func (*BeginQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochRequest) IsFlexible() bool { return false } +func (v *BeginQuorumEpochRequest) IsAdminRequest() {} +func (v *BeginQuorumEpochRequest) ResponseKind() Response { + r := &BeginQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BeginQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*BeginQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BeginQuorumEpochResponse) + return resp, err +} + +func (v *BeginQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochRequest returns a pointer to a default BeginQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochRequest() *BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequest. +func (v *BeginQuorumEpochRequest) Default() { +} + +// NewBeginQuorumEpochRequest returns a default BeginQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequest() BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return v +} + +type BeginQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopicPartition. +func (v *BeginQuorumEpochResponseTopicPartition) Default() { +} + +// NewBeginQuorumEpochResponseTopicPartition returns a default BeginQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopicPartition() BeginQuorumEpochResponseTopicPartition { + var v BeginQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochResponseTopic struct { + Topic string + + Partitions []BeginQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopic. +func (v *BeginQuorumEpochResponseTopic) Default() { +} + +// NewBeginQuorumEpochResponseTopic returns a default BeginQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopic() BeginQuorumEpochResponseTopic { + var v BeginQuorumEpochResponseTopic + v.Default() + return v +} + +type BeginQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []BeginQuorumEpochResponseTopic +} + +func (*BeginQuorumEpochResponse) Key() int16 { return 53 } +func (*BeginQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochResponse) IsFlexible() bool { return false } +func (v *BeginQuorumEpochResponse) RequestKind() Request { + return &BeginQuorumEpochRequest{Version: v.Version} +} + +func (v *BeginQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochResponse returns a pointer to a default BeginQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochResponse() *BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponse. +func (v *BeginQuorumEpochResponse) Default() { +} + +// NewBeginQuorumEpochResponse returns a default BeginQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponse() BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return v +} + +type EndQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The current leader ID that is resigning. + LeaderID int32 + + // The current epoch. + LeaderEpoch int32 + + // A sorted list of preferred successors to start the election. + PreferredSuccessors []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopicPartition. +func (v *EndQuorumEpochRequestTopicPartition) Default() { +} + +// NewEndQuorumEpochRequestTopicPartition returns a default EndQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopicPartition() EndQuorumEpochRequestTopicPartition { + var v EndQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type EndQuorumEpochRequestTopic struct { + Topic string + + Partitions []EndQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopic. +func (v *EndQuorumEpochRequestTopic) Default() { +} + +// NewEndQuorumEpochRequestTopic returns a default EndQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopic() EndQuorumEpochRequestTopic { + var v EndQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// EndQuorumEpochRequest is sent by a leader to gracefully step down as leader +// (i.e. on shutdown). Stepping down begins a new election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type EndQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []EndQuorumEpochRequestTopic +} + +func (*EndQuorumEpochRequest) Key() int16 { return 54 } +func (*EndQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochRequest) IsFlexible() bool { return false } +func (v *EndQuorumEpochRequest) IsAdminRequest() {} +func (v *EndQuorumEpochRequest) ResponseKind() Response { + r := &EndQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*EndQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndQuorumEpochResponse) + return resp, err +} + +func (v *EndQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PreferredSuccessors + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.PreferredSuccessors + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.PreferredSuccessors = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochRequest returns a pointer to a default EndQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochRequest() *EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequest. +func (v *EndQuorumEpochRequest) Default() { +} + +// NewEndQuorumEpochRequest returns a default EndQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequest() EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return v +} + +type EndQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopicPartition. +func (v *EndQuorumEpochResponseTopicPartition) Default() { +} + +// NewEndQuorumEpochResponseTopicPartition returns a default EndQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopicPartition() EndQuorumEpochResponseTopicPartition { + var v EndQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type EndQuorumEpochResponseTopic struct { + Topic string + + Partitions []EndQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopic. +func (v *EndQuorumEpochResponseTopic) Default() { +} + +// NewEndQuorumEpochResponseTopic returns a default EndQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopic() EndQuorumEpochResponseTopic { + var v EndQuorumEpochResponseTopic + v.Default() + return v +} + +type EndQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []EndQuorumEpochResponseTopic +} + +func (*EndQuorumEpochResponse) Key() int16 { return 54 } +func (*EndQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochResponse) IsFlexible() bool { return false } +func (v *EndQuorumEpochResponse) RequestKind() Request { + return &EndQuorumEpochRequest{Version: v.Version} +} + +func (v *EndQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochResponse returns a pointer to a default EndQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochResponse() *EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponse. +func (v *EndQuorumEpochResponse) Default() { +} + +// NewEndQuorumEpochResponse returns a default EndQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponse() EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return v +} + +// A common struct used in DescribeQuorumResponse. +type DescribeQuorumResponseTopicPartitionReplicaState struct { + ReplicaID int32 + + // The last known log end offset of the follower, or -1 if it is unknown. + LogEndOffset int64 + + // The last known leader wall clock time when a follower fetched from the + // leader, or -1 for the current leader or if unknown for a voter. + // + // This field has a default of -1. + LastFetchTimestamp int64 // v1+ + + // The leader wall clock append time of the offset for which the follower + // made the most recent fetch request, or -1 for the current leader or if + // unknown for a voter. + // + // This field has a default of -1. + LastCaughtUpTimestamp int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartitionReplicaState. +func (v *DescribeQuorumResponseTopicPartitionReplicaState) Default() { + v.LastFetchTimestamp = -1 + v.LastCaughtUpTimestamp = -1 +} + +// NewDescribeQuorumResponseTopicPartitionReplicaState returns a default DescribeQuorumResponseTopicPartitionReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartitionReplicaState() DescribeQuorumResponseTopicPartitionReplicaState { + var v DescribeQuorumResponseTopicPartitionReplicaState + v.Default() + return v +} + +type DescribeQuorumRequestTopicPartition struct { + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopicPartition. +func (v *DescribeQuorumRequestTopicPartition) Default() { +} + +// NewDescribeQuorumRequestTopicPartition returns a default DescribeQuorumRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopicPartition() DescribeQuorumRequestTopicPartition { + var v DescribeQuorumRequestTopicPartition + v.Default() + return v +} + +type DescribeQuorumRequestTopic struct { + Topic string + + Partitions []DescribeQuorumRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopic. +func (v *DescribeQuorumRequestTopic) Default() { +} + +// NewDescribeQuorumRequestTopic returns a default DescribeQuorumRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopic() DescribeQuorumRequestTopic { + var v DescribeQuorumRequestTopic + v.Default() + return v +} + +// Part of KIP-642 (and KIP-595) to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// DescribeQuorumRequest is sent by a leader to describe the quorum. +type DescribeQuorumRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Topics []DescribeQuorumRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumRequest) Key() int16 { return 55 } +func (*DescribeQuorumRequest) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumRequest) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumRequest) IsAdminRequest() {} +func (v *DescribeQuorumRequest) ResponseKind() Response { + r := &DescribeQuorumResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeQuorumRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeQuorumResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeQuorumResponse) + return resp, err +} + +func (v *DescribeQuorumRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumRequest returns a pointer to a default DescribeQuorumRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumRequest() *DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequest. +func (v *DescribeQuorumRequest) Default() { +} + +// NewDescribeQuorumRequest returns a default DescribeQuorumRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequest() DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return v +} + +type DescribeQuorumResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + HighWatermark int64 + + CurrentVoters []DescribeQuorumResponseTopicPartitionReplicaState + + Observers []DescribeQuorumResponseTopicPartitionReplicaState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartition. +func (v *DescribeQuorumResponseTopicPartition) Default() { +} + +// NewDescribeQuorumResponseTopicPartition returns a default DescribeQuorumResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartition() DescribeQuorumResponseTopicPartition { + var v DescribeQuorumResponseTopicPartition + v.Default() + return v +} + +type DescribeQuorumResponseTopic struct { + Topic string + + Partitions []DescribeQuorumResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopic. +func (v *DescribeQuorumResponseTopic) Default() { +} + +// NewDescribeQuorumResponseTopic returns a default DescribeQuorumResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopic() DescribeQuorumResponseTopic { + var v DescribeQuorumResponseTopic + v.Default() + return v +} + +type DescribeQuorumResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []DescribeQuorumResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumResponse) Key() int16 { return 55 } +func (*DescribeQuorumResponse) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumResponse) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumResponse) RequestKind() Request { + return &DescribeQuorumRequest{Version: v.Version} +} + +func (v *DescribeQuorumResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Observers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.HighWatermark = v + } + { + v := s.CurrentVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CurrentVoters = v + } + { + v := s.Observers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Observers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumResponse returns a pointer to a default DescribeQuorumResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumResponse() *DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponse. +func (v *DescribeQuorumResponse) Default() { +} + +// NewDescribeQuorumResponse returns a default DescribeQuorumResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponse() DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return v +} + +type AlterPartitionRequestTopicPartitionNewEpochISR struct { + // The broker ID . + BrokerID int32 + + // The broker's epoch; -1 if the epoch check is not supported. + // + // This field has a default of -1. + BrokerEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartitionNewEpochISR. +func (v *AlterPartitionRequestTopicPartitionNewEpochISR) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequestTopicPartitionNewEpochISR returns a default AlterPartitionRequestTopicPartitionNewEpochISR +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartitionNewEpochISR() AlterPartitionRequestTopicPartitionNewEpochISR { + var v AlterPartitionRequestTopicPartitionNewEpochISR + v.Default() + return v +} + +type AlterPartitionRequestTopicPartition struct { + Partition int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The ISR for this partition. + NewISR []int32 // v0-v2 + + NewEpochISR []AlterPartitionRequestTopicPartitionNewEpochISR // v3+ + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The expected epoch of the partition which is being updated. + // For a legacy cluster, this is the ZkVersion in the LeaderAndISR request. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartition. +func (v *AlterPartitionRequestTopicPartition) Default() { +} + +// NewAlterPartitionRequestTopicPartition returns a default AlterPartitionRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartition() AlterPartitionRequestTopicPartition { + var v AlterPartitionRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionRequestTopic struct { + Topic string // v0-v1 + + TopicID [16]byte // v2+ + + Partitions []AlterPartitionRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopic. +func (v *AlterPartitionRequestTopic) Default() { +} + +// NewAlterPartitionRequestTopic returns a default AlterPartitionRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopic() AlterPartitionRequestTopic { + var v AlterPartitionRequestTopic + v.Default() + return v +} + +// AlterPartitionRequest, proposed in KIP-497 and introduced in Kafka 2.7.0, +// is an admin request to modify ISR. +// +// Version 3 was added for KIP-903 and replaced NewISR. +type AlterPartitionRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + Topics []AlterPartitionRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionRequest) Key() int16 { return 56 } +func (*AlterPartitionRequest) MaxVersion() int16 { return 3 } +func (v *AlterPartitionRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionRequest) IsAdminRequest() {} +func (v *AlterPartitionRequest) ResponseKind() Response { + r := &AlterPartitionResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionResponse) + return resp, err +} + +func (v *AlterPartitionRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 2 { + v := v.NewISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.NewEpochISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if version >= 0 && version <= 2 { + v := s.NewISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.NewISR = v + } + if version >= 3 { + v := s.NewEpochISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartitionNewEpochISR, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int32() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.NewEpochISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionRequest returns a pointer to a default AlterPartitionRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionRequest() *AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequest. +func (v *AlterPartitionRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequest returns a default AlterPartitionRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequest() AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return v +} + +type AlterPartitionResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The broker ID of the leader. + LeaderID int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The in-sync replica ids. + ISR []int32 + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The current epoch of the partition for KRaft controllers. + // The current ZK version for legacy controllers. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopicPartition. +func (v *AlterPartitionResponseTopicPartition) Default() { +} + +// NewAlterPartitionResponseTopicPartition returns a default AlterPartitionResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopicPartition() AlterPartitionResponseTopicPartition { + var v AlterPartitionResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionResponseTopic struct { + Topic string // v0-v1 + + TopidID [16]byte // v2+ + + Partitions []AlterPartitionResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopic. +func (v *AlterPartitionResponseTopic) Default() { +} + +// NewAlterPartitionResponseTopic returns a default AlterPartitionResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopic() AlterPartitionResponseTopic { + var v AlterPartitionResponseTopic + v.Default() + return v +} + +type AlterPartitionResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + ErrorCode int16 + + Topics []AlterPartitionResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionResponse) Key() int16 { return 56 } +func (*AlterPartitionResponse) MaxVersion() int16 { return 3 } +func (v *AlterPartitionResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterPartitionResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterPartitionResponse) RequestKind() Request { + return &AlterPartitionRequest{Version: v.Version} +} + +func (v *AlterPartitionResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopidID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopidID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionResponse returns a pointer to a default AlterPartitionResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionResponse() *AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponse. +func (v *AlterPartitionResponse) Default() { +} + +// NewAlterPartitionResponse returns a default AlterPartitionResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponse() AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return v +} + +type UpdateFeaturesRequestFeatureUpdate struct { + // The name of the finalized feature to update. + Feature string + + // The new maximum version level for the finalized feature. A value >= 1 is + // valid. A value < 1, is special, and can be used to request the deletion + // of the finalized feature. + MaxVersionLevel int16 + + // When set to true, the finalized feature version level is allowed to be + // downgraded/deleted. The downgrade request will fail if the new maximum + // version level is a value that's not lower than the existing maximum + // finalized version level. + // + // Replaced in v1 with ValidateOnly. + AllowDowngrade bool + + // Determine which type of upgrade will be performed: 1 will perform an + // upgrade only (default), 2 is safe downgrades only (lossless), 3 is + // unsafe downgrades (lossy). + UpgradeType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequestFeatureUpdate. +func (v *UpdateFeaturesRequestFeatureUpdate) Default() { +} + +// NewUpdateFeaturesRequestFeatureUpdate returns a default UpdateFeaturesRequestFeatureUpdate +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequestFeatureUpdate() UpdateFeaturesRequestFeatureUpdate { + var v UpdateFeaturesRequestFeatureUpdate + v.Default() + return v +} + +// From KIP-584 and introduced in 2.7.0, this request updates broker-wide features. +type UpdateFeaturesRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // The list of updates to finalized features. + FeatureUpdates []UpdateFeaturesRequestFeatureUpdate + + // True if we should validate the request, but not perform the upgrade or + // downgrade. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesRequest) Key() int16 { return 57 } +func (*UpdateFeaturesRequest) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesRequest) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *UpdateFeaturesRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *UpdateFeaturesRequest) IsAdminRequest() {} +func (v *UpdateFeaturesRequest) ResponseKind() Response { + r := &UpdateFeaturesResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateFeaturesRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateFeaturesResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateFeaturesResponse) + return resp, err +} + +func (v *UpdateFeaturesRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FeatureUpdates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.AllowDowngrade + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.UpgradeType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.FeatureUpdates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesRequestFeatureUpdate, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.AllowDowngrade = v + } + if version >= 1 { + v := b.Int8() + s.UpgradeType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FeatureUpdates = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesRequest returns a pointer to a default UpdateFeaturesRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesRequest() *UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequest. +func (v *UpdateFeaturesRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewUpdateFeaturesRequest returns a default UpdateFeaturesRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequest() UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return v +} + +type UpdateFeaturesResponseResult struct { + // The name of the finalized feature. + Feature string + + // The feature update error code, if any. + ErrorCode int16 + + // The feature update error, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponseResult. +func (v *UpdateFeaturesResponseResult) Default() { +} + +// NewUpdateFeaturesResponseResult returns a default UpdateFeaturesResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponseResult() UpdateFeaturesResponseResult { + var v UpdateFeaturesResponseResult + v.Default() + return v +} + +type UpdateFeaturesResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level error code, if any. + ErrorCode int16 + + // An informative message if the request errored, if any. + ErrorMessage *string + + // The results for each feature update request. + Results []UpdateFeaturesResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesResponse) Key() int16 { return 57 } +func (*UpdateFeaturesResponse) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesResponse) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UpdateFeaturesResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *UpdateFeaturesResponse) RequestKind() Request { + return &UpdateFeaturesRequest{Version: v.Version} +} + +func (v *UpdateFeaturesResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesResponse returns a pointer to a default UpdateFeaturesResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesResponse() *UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponse. +func (v *UpdateFeaturesResponse) Default() { +} + +// NewUpdateFeaturesResponse returns a default UpdateFeaturesResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponse() UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return v +} + +// Introduced for KIP-590, EnvelopeRequest is what brokers use to wrap an +// incoming request before forwarding it to another broker. +type EnvelopeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded request header and data. + RequestData []byte + + // Value of the initial client principal when the request is redirected by a broker. + RequestPrincipal []byte + + // The original client's address in bytes. + ClientHostAddress []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeRequest) Key() int16 { return 58 } +func (*EnvelopeRequest) MaxVersion() int16 { return 0 } +func (v *EnvelopeRequest) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeRequest) GetVersion() int16 { return v.Version } +func (v *EnvelopeRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeRequest) IsAdminRequest() {} +func (v *EnvelopeRequest) ResponseKind() Response { + r := &EnvelopeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EnvelopeRequest) RequestWith(ctx context.Context, r Requestor) (*EnvelopeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EnvelopeResponse) + return resp, err +} + +func (v *EnvelopeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.RequestData + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RequestPrincipal + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ClientHostAddress + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.RequestData = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RequestPrincipal = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ClientHostAddress = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeRequest returns a pointer to a default EnvelopeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeRequest() *EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeRequest. +func (v *EnvelopeRequest) Default() { +} + +// NewEnvelopeRequest returns a default EnvelopeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeRequest() EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return v +} + +type EnvelopeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded response header and data. + ResponseData []byte + + // The error code, or 0 if there was no error. + // + // NOT_CONTROLLER is returned when the request is not sent to the controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if inter-broker authorization failed. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeResponse) Key() int16 { return 58 } +func (*EnvelopeResponse) MaxVersion() int16 { return 0 } +func (v *EnvelopeResponse) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeResponse) GetVersion() int16 { return v.Version } +func (v *EnvelopeResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeResponse) RequestKind() Request { return &EnvelopeRequest{Version: v.Version} } + +func (v *EnvelopeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ResponseData + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.ResponseData = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeResponse returns a pointer to a default EnvelopeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeResponse() *EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeResponse. +func (v *EnvelopeResponse) Default() { +} + +// NewEnvelopeResponse returns a default EnvelopeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeResponse() EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartitionSnapshotID. +func (v *FetchSnapshotRequestTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotRequestTopicPartitionSnapshotID returns a default FetchSnapshotRequestTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartitionSnapshotID() FetchSnapshotRequestTopicPartitionSnapshotID { + var v FetchSnapshotRequestTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartition struct { + // The partition to fetch. + Partition int32 + + // The current leader epoch of the partition, or -1 for an unknown leader epoch. + CurrentLeaderEpoch int32 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotRequestTopicPartitionSnapshotID + + // The byte position within the snapshot to start fetching from. + Position int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartition. +func (v *FetchSnapshotRequestTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } +} + +// NewFetchSnapshotRequestTopicPartition returns a default FetchSnapshotRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartition() FetchSnapshotRequestTopicPartition { + var v FetchSnapshotRequestTopicPartition + v.Default() + return v +} + +type FetchSnapshotRequestTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopic. +func (v *FetchSnapshotRequestTopic) Default() { +} + +// NewFetchSnapshotRequestTopic returns a default FetchSnapshotRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopic() FetchSnapshotRequestTopic { + var v FetchSnapshotRequestTopic + v.Default() + return v +} + +// Introduced for KIP-630, FetchSnapshotRequest is a part of the inter-Kafka +// raft protocol to remove the dependency on Zookeeper. +type FetchSnapshotRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ClusterID if known, this is used to validate metadata fetches prior to + // broker registration. + ClusterID *string // tag 0 + + // The broker ID of the follower. + // + // This field has a default of -1. + ReplicaID int32 + + // The maximum bytes to fetch from all of the snapshots. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 + + // The topics to fetch. + Topics []FetchSnapshotRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotRequest) Key() int16 { return 59 } +func (*FetchSnapshotRequest) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotRequest) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotRequest) ResponseKind() Response { + r := &FetchSnapshotResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchSnapshotRequest) RequestWith(ctx context.Context, r Requestor) (*FetchSnapshotResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchSnapshotResponse) + return resp, err +} + +func (v *FetchSnapshotRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxBytes = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Position = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchSnapshotRequest returns a pointer to a default FetchSnapshotRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotRequest() *FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequest. +func (v *FetchSnapshotRequest) Default() { + v.ReplicaID = -1 + v.MaxBytes = 2147483647 +} + +// NewFetchSnapshotRequest returns a default FetchSnapshotRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequest() FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionSnapshotID. +func (v *FetchSnapshotResponseTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionSnapshotID returns a default FetchSnapshotResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionSnapshotID() FetchSnapshotResponseTopicPartitionSnapshotID { + var v FetchSnapshotResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionCurrentLeader struct { + LeaderID int32 + + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionCurrentLeader. +func (v *FetchSnapshotResponseTopicPartitionCurrentLeader) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionCurrentLeader returns a default FetchSnapshotResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionCurrentLeader() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartition struct { + // The partition. + Partition int32 + + // An error code, or 0 if there was no fetch error. + ErrorCode int16 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotResponseTopicPartitionSnapshotID + + // The ID of the current leader (or -1 if unknown) and the latest known + // leader epoch. + CurrentLeader FetchSnapshotResponseTopicPartitionCurrentLeader // tag 0 + + // The total size of the snapshot. + Size int64 + + // The starting byte position within the snapshot included in the Bytes + // field. + Position int64 + + // Snapshot data. + Bytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartition. +func (v *FetchSnapshotResponseTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } + { + v := &v.CurrentLeader + _ = v + } +} + +// NewFetchSnapshotResponseTopicPartition returns a default FetchSnapshotResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartition() FetchSnapshotResponseTopicPartition { + var v FetchSnapshotResponseTopicPartition + v.Default() + return v +} + +type FetchSnapshotResponseTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopic. +func (v *FetchSnapshotResponseTopic) Default() { +} + +// NewFetchSnapshotResponseTopic returns a default FetchSnapshotResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopic() FetchSnapshotResponseTopic { + var v FetchSnapshotResponseTopic + v.Default() + return v +} + +// FetchSnapshotResponse is a response for a FetchSnapshotRequest. +type FetchSnapshotResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The topics to fetch. + Topics []FetchSnapshotResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotResponse) Key() int16 { return 59 } +func (*FetchSnapshotResponse) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotResponse) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *FetchSnapshotResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchSnapshotResponse) RequestKind() Request { + return &FetchSnapshotRequest{Version: v.Version} +} + +func (v *FetchSnapshotResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Bytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.Position = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Bytes = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFetchSnapshotResponse returns a pointer to a default FetchSnapshotResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotResponse() *FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponse. +func (v *FetchSnapshotResponse) Default() { +} + +// NewFetchSnapshotResponse returns a default FetchSnapshotResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponse() FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return v +} + +// Introduced for KIP-700, DescribeClusterRequest is effectively an "admin" +// type metadata request for information that producers or consumers do not +// need to care about. +type DescribeClusterRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Whether to include cluster authorized operations. This requires DESCRIBE + // on CLUSTER. + IncludeClusterAuthorizedOperations bool + + // The endpoint type to describe. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterRequest) Key() int16 { return 60 } +func (*DescribeClusterRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClusterRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClusterRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterRequest) ResponseKind() Response { + r := &DescribeClusterResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClusterRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClusterResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClusterResponse) + return resp, err +} + +func (v *DescribeClusterRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterRequest returns a pointer to a default DescribeClusterRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterRequest() *DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterRequest. +func (v *DescribeClusterRequest) Default() { + v.EndpointType = 1 +} + +// NewDescribeClusterRequest returns a default DescribeClusterRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterRequest() DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return v +} + +type DescribeClusterResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in, if any. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponseBroker. +func (v *DescribeClusterResponseBroker) Default() { +} + +// NewDescribeClusterResponseBroker returns a default DescribeClusterResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponseBroker() DescribeClusterResponseBroker { + var v DescribeClusterResponseBroker + v.Default() + return v +} + +// DescribeClusterResponse is a response to a DescribeClusterRequest. +type DescribeClusterResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The top level error message, if any. + ErrorMessage *string + + // The endpoint type that was described. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // The cluster ID that responding broker belongs to. + ClusterID string + + // The ID of the controller broker. + // + // This field has a default of -1. + ControllerID int32 + + // Brokers is a set of alive Kafka brokers (this mirrors MetadataResponse.Brokers). + Brokers []DescribeClusterResponseBroker + + // 32-bit bitfield to represent authorized operations for this cluster. + // + // This field has a default of -2147483648. + ClusterAuthorizedOperations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterResponse) Key() int16 { return 60 } +func (*DescribeClusterResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClusterResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClusterResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeClusterResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClusterResponse) RequestKind() Request { + return &DescribeClusterRequest{Version: v.Version} +} + +func (v *DescribeClusterResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ClusterAuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClusterResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + { + v := b.Int32() + s.ClusterAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterResponse returns a pointer to a default DescribeClusterResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterResponse() *DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponse. +func (v *DescribeClusterResponse) Default() { + v.EndpointType = 1 + v.ControllerID = -1 + v.ClusterAuthorizedOperations = -2147483648 +} + +// NewDescribeClusterResponse returns a default DescribeClusterResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponse() DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return v +} + +type DescribeProducersRequestTopic struct { + Topic string + + // The partitions to list producers for for the given topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequestTopic. +func (v *DescribeProducersRequestTopic) Default() { +} + +// NewDescribeProducersRequestTopic returns a default DescribeProducersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequestTopic() DescribeProducersRequestTopic { + var v DescribeProducersRequestTopic + v.Default() + return v +} + +// Introduced for KIP-664, DescribeProducersRequest allows for introspecting +// the state of the transaction coordinator. This request can be used to detect +// hanging transactions or other EOS-related problems. +// +// This request allows for describing the state of the active +// idempotent/transactional producers. +type DescribeProducersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The topics to describe producers for. + Topics []DescribeProducersRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersRequest) Key() int16 { return 61 } +func (*DescribeProducersRequest) MaxVersion() int16 { return 0 } +func (v *DescribeProducersRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersRequest) GetVersion() int16 { return v.Version } +func (v *DescribeProducersRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersRequest) ResponseKind() Response { + r := &DescribeProducersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeProducersRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeProducersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeProducersResponse) + return resp, err +} + +func (v *DescribeProducersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersRequest returns a pointer to a default DescribeProducersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersRequest() *DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequest. +func (v *DescribeProducersRequest) Default() { +} + +// NewDescribeProducersRequest returns a default DescribeProducersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequest() DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return v +} + +type DescribeProducersResponseTopicPartitionActiveProducer struct { + ProducerID int64 + + ProducerEpoch int32 + + // The last sequence produced. + // + // This field has a default of -1. + LastSequence int32 + + // The last timestamp produced. + // + // This field has a default of -1. + LastTimestamp int64 + + // The epoch of the transactional coordinator for this last produce. + CoordinatorEpoch int32 + + // The first offset of the transaction. + // + // This field has a default of -1. + CurrentTxnStartOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartitionActiveProducer. +func (v *DescribeProducersResponseTopicPartitionActiveProducer) Default() { + v.LastSequence = -1 + v.LastTimestamp = -1 + v.CurrentTxnStartOffset = -1 +} + +// NewDescribeProducersResponseTopicPartitionActiveProducer returns a default DescribeProducersResponseTopicPartitionActiveProducer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartitionActiveProducer() DescribeProducersResponseTopicPartitionActiveProducer { + var v DescribeProducersResponseTopicPartitionActiveProducer + v.Default() + return v +} + +type DescribeProducersResponseTopicPartition struct { + Partition int32 + + // The partition error code, or 0 if there was no error. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this request + // is not the leader of the partition. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the user does not have Describe + // permissions on the topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the partition is not known to exist. + // + // Other errors may be returned corresponding to the partition being offline, etc. + ErrorCode int16 + + // The partition error message, which may be null if no additional details are available. + ErrorMessage *string + + // The current idempotent or transactional producers producing to this partition, + // and the metadata related to their produce requests. + ActiveProducers []DescribeProducersResponseTopicPartitionActiveProducer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartition. +func (v *DescribeProducersResponseTopicPartition) Default() { +} + +// NewDescribeProducersResponseTopicPartition returns a default DescribeProducersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartition() DescribeProducersResponseTopicPartition { + var v DescribeProducersResponseTopicPartition + v.Default() + return v +} + +type DescribeProducersResponseTopic struct { + Topic string + + Partitions []DescribeProducersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopic. +func (v *DescribeProducersResponseTopic) Default() { +} + +// NewDescribeProducersResponseTopic returns a default DescribeProducersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopic() DescribeProducersResponseTopic { + var v DescribeProducersResponseTopic + v.Default() + return v +} + +// DescribeProducersResponse is a response to a DescribeProducersRequest. +type DescribeProducersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + Topics []DescribeProducersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersResponse) Key() int16 { return 61 } +func (*DescribeProducersResponse) MaxVersion() int16 { return 0 } +func (v *DescribeProducersResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersResponse) GetVersion() int16 { return v.Version } +func (v *DescribeProducersResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeProducersResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeProducersResponse) RequestKind() Request { + return &DescribeProducersRequest{Version: v.Version} +} + +func (v *DescribeProducersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ActiveProducers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentTxnStartOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.ActiveProducers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartitionActiveProducer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int32() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.LastSequence = v + } + { + v := b.Int64() + s.LastTimestamp = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + { + v := b.Int64() + s.CurrentTxnStartOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ActiveProducers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersResponse returns a pointer to a default DescribeProducersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersResponse() *DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponse. +func (v *DescribeProducersResponse) Default() { +} + +// NewDescribeProducersResponse returns a default DescribeProducersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponse() DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return v +} + +type BrokerRegistrationRequestListener struct { + // The name of this endpoint. + Name string + + // The hostname. + Host string + + // The port. + Port uint16 + + // The security protocol. + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestListener. +func (v *BrokerRegistrationRequestListener) Default() { +} + +// NewBrokerRegistrationRequestListener returns a default BrokerRegistrationRequestListener +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestListener() BrokerRegistrationRequestListener { + var v BrokerRegistrationRequestListener + v.Default() + return v +} + +type BrokerRegistrationRequestFeature struct { + // The name of the feature. + Name string + + // The minimum supported feature level. + MinSupportedVersion int16 + + // The maximum supported feature level. + MaxSupportedVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestFeature. +func (v *BrokerRegistrationRequestFeature) Default() { +} + +// NewBrokerRegistrationRequestFeature returns a default BrokerRegistrationRequestFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestFeature() BrokerRegistrationRequestFeature { + var v BrokerRegistrationRequestFeature + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerRegistrationRequest is an internal +// broker-to-broker only request. +type BrokerRegistrationRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The cluster ID of the broker process. + ClusterID string + + // The incarnation ID of the broker process. + IncarnationID [16]byte + + // The listeners for this broker. + Listeners []BrokerRegistrationRequestListener + + // Features on this broker. + Features []BrokerRegistrationRequestFeature + + // The rack that this broker is in, if any. + Rack *string + + // If the required configurations for ZK migration are present, this value is + // set to true. + IsMigratingZkBroker bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationRequest) Key() int16 { return 62 } +func (*BrokerRegistrationRequest) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationRequest) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationRequest) ResponseKind() Response { + r := &BrokerRegistrationResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerRegistrationRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerRegistrationResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerRegistrationResponse) + return resp, err +} + +func (v *BrokerRegistrationRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IncarnationID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Listeners + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendUint16(dst, v) + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Features + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.IsMigratingZkBroker + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Uuid() + s.IncarnationID = v + } + { + v := s.Listeners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestListener, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Uint16() + s.Port = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Listeners = v + } + { + v := s.Features + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinSupportedVersion = v + } + { + v := b.Int16() + s.MaxSupportedVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Features = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if version >= 1 { + v := b.Bool() + s.IsMigratingZkBroker = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationRequest returns a pointer to a default BrokerRegistrationRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationRequest() *BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequest. +func (v *BrokerRegistrationRequest) Default() { +} + +// NewBrokerRegistrationRequest returns a default BrokerRegistrationRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequest() BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return v +} + +// BrokerRegistrationResponse is a response to a BrokerRegistrationRequest. +type BrokerRegistrationResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The broker's assigned epoch, or -1 if none was assigned. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationResponse) Key() int16 { return 62 } +func (*BrokerRegistrationResponse) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationResponse) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *BrokerRegistrationResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerRegistrationResponse) RequestKind() Request { + return &BrokerRegistrationRequest{Version: v.Version} +} + +func (v *BrokerRegistrationResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationResponse returns a pointer to a default BrokerRegistrationResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationResponse() *BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationResponse. +func (v *BrokerRegistrationResponse) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerRegistrationResponse returns a default BrokerRegistrationResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationResponse() BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerHeartbeatRequest is an internal +// broker-to-broker only request. +type BrokerHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The broker's epoch. + // + // This field has a default of -1. + BrokerEpoch int64 + + // The highest metadata offset that the broker has reached. + CurrentMetadataOffset int64 + + // True if the broker wants to be fenced. + WantFence bool + + // True if the broker wants to be shutdown. + WantShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatRequest) Key() int16 { return 63 } +func (*BrokerHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatRequest) ResponseKind() Response { + r := &BrokerHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerHeartbeatResponse) + return resp, err +} + +func (v *BrokerHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentMetadataOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.WantFence + dst = kbin.AppendBool(dst, v) + } + { + v := v.WantShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := b.Int64() + s.CurrentMetadataOffset = v + } + { + v := b.Bool() + s.WantFence = v + } + { + v := b.Bool() + s.WantShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatRequest returns a pointer to a default BrokerHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatRequest() *BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatRequest. +func (v *BrokerHeartbeatRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerHeartbeatRequest returns a default BrokerHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatRequest() BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return v +} + +// BrokerHeartbeatResponse is a response to a BrokerHeartbeatRequest. +type BrokerHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // True if the broker has approximately caught up with the latest metadata. + IsCaughtUp bool + + // True if the broker is fenced. + // + // This field has a default of true. + IsFenced bool + + // True if the broker should proceed with its shutdown. + ShouldShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatResponse) Key() int16 { return 63 } +func (*BrokerHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *BrokerHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerHeartbeatResponse) RequestKind() Request { + return &BrokerHeartbeatRequest{Version: v.Version} +} + +func (v *BrokerHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.IsCaughtUp + dst = kbin.AppendBool(dst, v) + } + { + v := v.IsFenced + dst = kbin.AppendBool(dst, v) + } + { + v := v.ShouldShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Bool() + s.IsCaughtUp = v + } + { + v := b.Bool() + s.IsFenced = v + } + { + v := b.Bool() + s.ShouldShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatResponse returns a pointer to a default BrokerHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatResponse() *BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatResponse. +func (v *BrokerHeartbeatResponse) Default() { + v.IsFenced = true +} + +// NewBrokerHeartbeatResponse returns a default BrokerHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatResponse() BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, UnregisterBrokerRequest is an admin request to +// remove registration of a broker from the cluster. +type UnregisterBrokerRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID to unregister. + BrokerID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerRequest) Key() int16 { return 64 } +func (*UnregisterBrokerRequest) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerRequest) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerRequest) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerRequest) ResponseKind() Response { + r := &UnregisterBrokerResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UnregisterBrokerRequest) RequestWith(ctx context.Context, r Requestor) (*UnregisterBrokerResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UnregisterBrokerResponse) + return resp, err +} + +func (v *UnregisterBrokerRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerRequest returns a pointer to a default UnregisterBrokerRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerRequest() *UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerRequest. +func (v *UnregisterBrokerRequest) Default() { +} + +// NewUnregisterBrokerRequest returns a default UnregisterBrokerRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerRequest() UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return v +} + +// UnregisterBrokerResponse is a response to a UnregisterBrokerRequest. +type UnregisterBrokerResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerResponse) Key() int16 { return 64 } +func (*UnregisterBrokerResponse) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerResponse) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerResponse) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UnregisterBrokerResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *UnregisterBrokerResponse) RequestKind() Request { + return &UnregisterBrokerRequest{Version: v.Version} +} + +func (v *UnregisterBrokerResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerResponse returns a pointer to a default UnregisterBrokerResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerResponse() *UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerResponse. +func (v *UnregisterBrokerResponse) Default() { +} + +// NewUnregisterBrokerResponse returns a default UnregisterBrokerResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerResponse() UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return v +} + +// For KIP-664, DescribeTransactionsRequest describes the state of transactions. +type DescribeTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Array of transactionalIds to include in describe results. If empty, then + // no results will be returned. + TransactionalIDs []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsRequest) Key() int16 { return 65 } +func (*DescribeTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsRequest) ResponseKind() Response { + r := &DescribeTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeTransactionsResponse) + return resp, err +} + +func (v *DescribeTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TransactionalIDs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.TransactionalIDs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TransactionalIDs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsRequest returns a pointer to a default DescribeTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsRequest() *DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsRequest. +func (v *DescribeTransactionsRequest) Default() { +} + +// NewDescribeTransactionsRequest returns a default DescribeTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsRequest() DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionStateTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionStateTopic. +func (v *DescribeTransactionsResponseTransactionStateTopic) Default() { +} + +// NewDescribeTransactionsResponseTransactionStateTopic returns a default DescribeTransactionsResponseTransactionStateTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionStateTopic() DescribeTransactionsResponseTransactionStateTopic { + var v DescribeTransactionsResponseTransactionStateTopic + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionState struct { + // A potential error code for describing this transaction. + // + // NOT_COORDINATOR is returned if the broker receiving this transactional + // ID does not own the ID. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordiantor is laoding. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is being shutdown. + // + // TRANSACTIONAL_ID_NOT_FOUND is returned if the transactional ID could not be found. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the user does not have + // Describe permissions on the transactional ID. + ErrorCode int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string + + // State is the state the transaction is in. + State string + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // The set of partitions included in the current transaction (if active). + // When a transaction is preparing to commit or abort, this will include + // only partitions which do not have markers. + // + // This does not include topics the user is not authorized to describe. + Topics []DescribeTransactionsResponseTransactionStateTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionState. +func (v *DescribeTransactionsResponseTransactionState) Default() { +} + +// NewDescribeTransactionsResponseTransactionState returns a default DescribeTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionState() DescribeTransactionsResponseTransactionState { + var v DescribeTransactionsResponseTransactionState + v.Default() + return v +} + +// DescribeTransactionsResponse is a response to a DescribeTransactionsRequest. +type DescribeTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + TransactionStates []DescribeTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsResponse) Key() int16 { return 65 } +func (*DescribeTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeTransactionsResponse) RequestKind() Request { + return &DescribeTransactionsRequest{Version: v.Version} +} + +func (v *DescribeTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionStateTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsResponse returns a pointer to a default DescribeTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsResponse() *DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponse. +func (v *DescribeTransactionsResponse) Default() { +} + +// NewDescribeTransactionsResponse returns a default DescribeTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponse() DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return v +} + +// For KIP-664, ListTransactionsRequest lists transactions. +type ListTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The transaction states to filter by: if empty, all transactions are + // returned; if non-empty, then only transactions matching one of the + // filtered states will be returned. + // + // For a list of valid states, see the TransactionState enum. + StateFilters []string + + // The producer IDs to filter by: if empty, all transactions will be + // returned; if non-empty, only transactions which match one of the filtered + // producer IDs will be returned + ProducerIDFilters []int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsRequest) Key() int16 { return 66 } +func (*ListTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *ListTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *ListTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsRequest) ResponseKind() Response { + r := &ListTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*ListTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListTransactionsResponse) + return resp, err +} + +func (v *ListTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.StateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ProducerIDFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.StateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StateFilters = v + } + { + v := s.ProducerIDFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.ProducerIDFilters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsRequest returns a pointer to a default ListTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsRequest() *ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsRequest. +func (v *ListTransactionsRequest) Default() { +} + +// NewListTransactionsRequest returns a default ListTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsRequest() ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return v +} + +type ListTransactionsResponseTransactionState struct { + // The transactional ID being used. + TransactionalID string + + // The producer ID of the producer. + ProducerID int64 + + // The current transaction state of the producer. + TransactionState string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponseTransactionState. +func (v *ListTransactionsResponseTransactionState) Default() { +} + +// NewListTransactionsResponseTransactionState returns a default ListTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponseTransactionState() ListTransactionsResponseTransactionState { + var v ListTransactionsResponseTransactionState + v.Default() + return v +} + +// ListTransactionsResponse is a response to a ListTransactionsRequest. +type ListTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // A potential error code for the listing, + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator is loading. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator receiving this + // request is shutting down. + ErrorCode int16 + + // Set of state filters provided in the request which were unknown to the + // transaction coordinator. + UnknownStateFilters []string + + // TransactionStates contains all transactions that were matched for listing + // in the request. The response elides transactions that the user does not have + // permission to describe (DESCRIBE on TRANSACTIONAL_ID for the transaction). + TransactionStates []ListTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsResponse) Key() int16 { return 66 } +func (*ListTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *ListTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *ListTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ListTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListTransactionsResponse) RequestKind() Request { + return &ListTransactionsRequest{Version: v.Version} +} + +func (v *ListTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.UnknownStateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TransactionState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.UnknownStateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.UnknownStateFilters = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsResponse returns a pointer to a default ListTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsResponse() *ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponse. +func (v *ListTransactionsResponse) Default() { +} + +// NewListTransactionsResponse returns a default ListTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponse() ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return v +} + +// For KIP-730, AllocateProducerIDsRequest is a broker-to-broker request that +// requests a block of producer IDs from the controller broker. This is more +// specifically introduced for raft, but allows for one more request to avoid +// zookeeper in the non-raft world as well. +type AllocateProducerIDsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsRequest) Key() int16 { return 67 } +func (*AllocateProducerIDsRequest) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsRequest) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsRequest) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsRequest) ResponseKind() Response { + r := &AllocateProducerIDsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AllocateProducerIDsRequest) RequestWith(ctx context.Context, r Requestor) (*AllocateProducerIDsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AllocateProducerIDsResponse) + return resp, err +} + +func (v *AllocateProducerIDsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsRequest returns a pointer to a default AllocateProducerIDsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsRequest() *AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsRequest. +func (v *AllocateProducerIDsRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAllocateProducerIDsRequest returns a default AllocateProducerIDsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsRequest() AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return v +} + +// AllocateProducerIDsResponse is a response to an AllocateProducerIDsRequest. +type AllocateProducerIDsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // An error code, if any. + ErrorCode int16 + + // The first producer ID in this range, inclusive. + ProducerIDStart int64 + + // The number of producer IDs in this range. + ProducerIDLen int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsResponse) Key() int16 { return 67 } +func (*AllocateProducerIDsResponse) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsResponse) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsResponse) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AllocateProducerIDsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AllocateProducerIDsResponse) RequestKind() Request { + return &AllocateProducerIDsRequest{Version: v.Version} +} + +func (v *AllocateProducerIDsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerIDStart + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerIDLen + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerIDStart = v + } + { + v := b.Int32() + s.ProducerIDLen = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsResponse returns a pointer to a default AllocateProducerIDsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsResponse() *AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsResponse. +func (v *AllocateProducerIDsResponse) Default() { +} + +// NewAllocateProducerIDsResponse returns a default AllocateProducerIDsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsResponse() AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return v +} + +type ConsumerGroupHeartbeatRequestTopic struct { + // The topic ID. + TopicID [16]byte + + // The partitions. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequestTopic. +func (v *ConsumerGroupHeartbeatRequestTopic) Default() { +} + +// NewConsumerGroupHeartbeatRequestTopic returns a default ConsumerGroupHeartbeatRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequestTopic() ConsumerGroupHeartbeatRequestTopic { + var v ConsumerGroupHeartbeatRequestTopic + v.Default() + return v +} + +// ConsumerGroupHeartbeat is a part of KIP-848; there are a lot of details +// to this request so documentation is left to the KIP itself. +type ConsumerGroupHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The group ID. + Group string + + // The member ID generated by the coordinator. This must be kept during + // the entire lifetime of the member. + MemberID string + + // The current member epoch; 0 to join the group, -1 to leave, -2 to + // indicate that the static member will rejoin. + MemberEpoch int32 + + // Instance ID of the member; null if not provided or if unchanging. + InstanceID *string + + // The rack ID of the member; null if not provided or if unchanging. + RackID *string + + // RebalanceTimeoutMillis is how long the coordinator will wait on a member + // to revoke its partitions. -1 if unchanging. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 + + // Subscribed topics; null if unchanging. + SubscribedTopicNames []string + + // The server side assignor to use; null if unchanging. + ServerAssignor *string + + // Topic partitions owned by the member; null if unchanging. + Topics []ConsumerGroupHeartbeatRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatRequest) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatRequest) ResponseKind() Response { + r := &ConsumerGroupHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ConsumerGroupHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*ConsumerGroupHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ConsumerGroupHeartbeatResponse) + return resp, err +} + +func (v *ConsumerGroupHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RackID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SubscribedTopicNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ServerAssignor + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.RackID = v + } + { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := s.SubscribedTopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.SubscribedTopicNames = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ServerAssignor = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ConsumerGroupHeartbeatRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatRequest returns a pointer to a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatRequest() *ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequest. +func (v *ConsumerGroupHeartbeatRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewConsumerGroupHeartbeatRequest returns a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequest() ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignmentTopic struct { + TopicID [16]byte + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignmentTopic. +func (v *ConsumerGroupHeartbeatResponseAssignmentTopic) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignmentTopic returns a default ConsumerGroupHeartbeatResponseAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignmentTopic() ConsumerGroupHeartbeatResponseAssignmentTopic { + var v ConsumerGroupHeartbeatResponseAssignmentTopic + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignment struct { + // The topics partitions that can be used immediately. + Topics []ConsumerGroupHeartbeatResponseAssignmentTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignment. +func (v *ConsumerGroupHeartbeatResponseAssignment) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignment returns a default ConsumerGroupHeartbeatResponseAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignment() ConsumerGroupHeartbeatResponseAssignment { + var v ConsumerGroupHeartbeatResponseAssignment + v.Default() + return v +} + +// ConsumerGroupHeartbeatResponse is returned from a ConsumerGroupHeartbeatRequest. +type ConsumerGroupHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error for this response. + // + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_MEMBER_ID (version 0+) + // - FENCED_MEMBER_EPOCH (version 0+) + // - UNSUPPORTED_ASSIGNOR (version 0+) + // - UNRELEASED_INSTANCE_ID (version 0+) + // - GROUP_MAX_SIZE_REACHED (version 0+) + ErrorCode int16 + + // A supplementary message if this errored. + ErrorMessage *string + + // The member ID generated by the coordinator; provided when joining + // with MemberEpoch=0. + MemberID *string + + // The member epoch. + MemberEpoch int32 + + // The heartbeat interval, in milliseconds. + HeartbeatIntervalMillis int32 + + // The assignment; null if not provided. + Assignment *ConsumerGroupHeartbeatResponseAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatResponse) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ConsumerGroupHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ConsumerGroupHeartbeatResponse) RequestKind() Request { + return &ConsumerGroupHeartbeatRequest{Version: v.Version} +} + +func (v *ConsumerGroupHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HeartbeatIntervalMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if v == nil { + dst = append(dst, 255) + } else { + dst = append(dst, 1) + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + v := b.Int32() + s.HeartbeatIntervalMillis = v + } + { + if present := b.Int8(); present != -1 && b.Ok() { + s.Assignment = new(ConsumerGroupHeartbeatResponseAssignment) + v := s.Assignment + v.Default() + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatResponseAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatResponse returns a pointer to a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatResponse() *ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponse. +func (v *ConsumerGroupHeartbeatResponse) Default() { + { + v := &v.Assignment + _ = v + } +} + +// NewConsumerGroupHeartbeatResponse returns a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponse() ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return v +} + +// RequestForKey returns the request corresponding to the given request key +// or nil if the key is unknown. +func RequestForKey(key int16) Request { + switch key { + default: + return nil + case 0: + return NewPtrProduceRequest() + case 1: + return NewPtrFetchRequest() + case 2: + return NewPtrListOffsetsRequest() + case 3: + return NewPtrMetadataRequest() + case 4: + return NewPtrLeaderAndISRRequest() + case 5: + return NewPtrStopReplicaRequest() + case 6: + return NewPtrUpdateMetadataRequest() + case 7: + return NewPtrControlledShutdownRequest() + case 8: + return NewPtrOffsetCommitRequest() + case 9: + return NewPtrOffsetFetchRequest() + case 10: + return NewPtrFindCoordinatorRequest() + case 11: + return NewPtrJoinGroupRequest() + case 12: + return NewPtrHeartbeatRequest() + case 13: + return NewPtrLeaveGroupRequest() + case 14: + return NewPtrSyncGroupRequest() + case 15: + return NewPtrDescribeGroupsRequest() + case 16: + return NewPtrListGroupsRequest() + case 17: + return NewPtrSASLHandshakeRequest() + case 18: + return NewPtrApiVersionsRequest() + case 19: + return NewPtrCreateTopicsRequest() + case 20: + return NewPtrDeleteTopicsRequest() + case 21: + return NewPtrDeleteRecordsRequest() + case 22: + return NewPtrInitProducerIDRequest() + case 23: + return NewPtrOffsetForLeaderEpochRequest() + case 24: + return NewPtrAddPartitionsToTxnRequest() + case 25: + return NewPtrAddOffsetsToTxnRequest() + case 26: + return NewPtrEndTxnRequest() + case 27: + return NewPtrWriteTxnMarkersRequest() + case 28: + return NewPtrTxnOffsetCommitRequest() + case 29: + return NewPtrDescribeACLsRequest() + case 30: + return NewPtrCreateACLsRequest() + case 31: + return NewPtrDeleteACLsRequest() + case 32: + return NewPtrDescribeConfigsRequest() + case 33: + return NewPtrAlterConfigsRequest() + case 34: + return NewPtrAlterReplicaLogDirsRequest() + case 35: + return NewPtrDescribeLogDirsRequest() + case 36: + return NewPtrSASLAuthenticateRequest() + case 37: + return NewPtrCreatePartitionsRequest() + case 38: + return NewPtrCreateDelegationTokenRequest() + case 39: + return NewPtrRenewDelegationTokenRequest() + case 40: + return NewPtrExpireDelegationTokenRequest() + case 41: + return NewPtrDescribeDelegationTokenRequest() + case 42: + return NewPtrDeleteGroupsRequest() + case 43: + return NewPtrElectLeadersRequest() + case 44: + return NewPtrIncrementalAlterConfigsRequest() + case 45: + return NewPtrAlterPartitionAssignmentsRequest() + case 46: + return NewPtrListPartitionReassignmentsRequest() + case 47: + return NewPtrOffsetDeleteRequest() + case 48: + return NewPtrDescribeClientQuotasRequest() + case 49: + return NewPtrAlterClientQuotasRequest() + case 50: + return NewPtrDescribeUserSCRAMCredentialsRequest() + case 51: + return NewPtrAlterUserSCRAMCredentialsRequest() + case 52: + return NewPtrVoteRequest() + case 53: + return NewPtrBeginQuorumEpochRequest() + case 54: + return NewPtrEndQuorumEpochRequest() + case 55: + return NewPtrDescribeQuorumRequest() + case 56: + return NewPtrAlterPartitionRequest() + case 57: + return NewPtrUpdateFeaturesRequest() + case 58: + return NewPtrEnvelopeRequest() + case 59: + return NewPtrFetchSnapshotRequest() + case 60: + return NewPtrDescribeClusterRequest() + case 61: + return NewPtrDescribeProducersRequest() + case 62: + return NewPtrBrokerRegistrationRequest() + case 63: + return NewPtrBrokerHeartbeatRequest() + case 64: + return NewPtrUnregisterBrokerRequest() + case 65: + return NewPtrDescribeTransactionsRequest() + case 66: + return NewPtrListTransactionsRequest() + case 67: + return NewPtrAllocateProducerIDsRequest() + case 68: + return NewPtrConsumerGroupHeartbeatRequest() + } +} + +// ResponseForKey returns the response corresponding to the given request key +// or nil if the key is unknown. +func ResponseForKey(key int16) Response { + switch key { + default: + return nil + case 0: + return NewPtrProduceResponse() + case 1: + return NewPtrFetchResponse() + case 2: + return NewPtrListOffsetsResponse() + case 3: + return NewPtrMetadataResponse() + case 4: + return NewPtrLeaderAndISRResponse() + case 5: + return NewPtrStopReplicaResponse() + case 6: + return NewPtrUpdateMetadataResponse() + case 7: + return NewPtrControlledShutdownResponse() + case 8: + return NewPtrOffsetCommitResponse() + case 9: + return NewPtrOffsetFetchResponse() + case 10: + return NewPtrFindCoordinatorResponse() + case 11: + return NewPtrJoinGroupResponse() + case 12: + return NewPtrHeartbeatResponse() + case 13: + return NewPtrLeaveGroupResponse() + case 14: + return NewPtrSyncGroupResponse() + case 15: + return NewPtrDescribeGroupsResponse() + case 16: + return NewPtrListGroupsResponse() + case 17: + return NewPtrSASLHandshakeResponse() + case 18: + return NewPtrApiVersionsResponse() + case 19: + return NewPtrCreateTopicsResponse() + case 20: + return NewPtrDeleteTopicsResponse() + case 21: + return NewPtrDeleteRecordsResponse() + case 22: + return NewPtrInitProducerIDResponse() + case 23: + return NewPtrOffsetForLeaderEpochResponse() + case 24: + return NewPtrAddPartitionsToTxnResponse() + case 25: + return NewPtrAddOffsetsToTxnResponse() + case 26: + return NewPtrEndTxnResponse() + case 27: + return NewPtrWriteTxnMarkersResponse() + case 28: + return NewPtrTxnOffsetCommitResponse() + case 29: + return NewPtrDescribeACLsResponse() + case 30: + return NewPtrCreateACLsResponse() + case 31: + return NewPtrDeleteACLsResponse() + case 32: + return NewPtrDescribeConfigsResponse() + case 33: + return NewPtrAlterConfigsResponse() + case 34: + return NewPtrAlterReplicaLogDirsResponse() + case 35: + return NewPtrDescribeLogDirsResponse() + case 36: + return NewPtrSASLAuthenticateResponse() + case 37: + return NewPtrCreatePartitionsResponse() + case 38: + return NewPtrCreateDelegationTokenResponse() + case 39: + return NewPtrRenewDelegationTokenResponse() + case 40: + return NewPtrExpireDelegationTokenResponse() + case 41: + return NewPtrDescribeDelegationTokenResponse() + case 42: + return NewPtrDeleteGroupsResponse() + case 43: + return NewPtrElectLeadersResponse() + case 44: + return NewPtrIncrementalAlterConfigsResponse() + case 45: + return NewPtrAlterPartitionAssignmentsResponse() + case 46: + return NewPtrListPartitionReassignmentsResponse() + case 47: + return NewPtrOffsetDeleteResponse() + case 48: + return NewPtrDescribeClientQuotasResponse() + case 49: + return NewPtrAlterClientQuotasResponse() + case 50: + return NewPtrDescribeUserSCRAMCredentialsResponse() + case 51: + return NewPtrAlterUserSCRAMCredentialsResponse() + case 52: + return NewPtrVoteResponse() + case 53: + return NewPtrBeginQuorumEpochResponse() + case 54: + return NewPtrEndQuorumEpochResponse() + case 55: + return NewPtrDescribeQuorumResponse() + case 56: + return NewPtrAlterPartitionResponse() + case 57: + return NewPtrUpdateFeaturesResponse() + case 58: + return NewPtrEnvelopeResponse() + case 59: + return NewPtrFetchSnapshotResponse() + case 60: + return NewPtrDescribeClusterResponse() + case 61: + return NewPtrDescribeProducersResponse() + case 62: + return NewPtrBrokerRegistrationResponse() + case 63: + return NewPtrBrokerHeartbeatResponse() + case 64: + return NewPtrUnregisterBrokerResponse() + case 65: + return NewPtrDescribeTransactionsResponse() + case 66: + return NewPtrListTransactionsResponse() + case 67: + return NewPtrAllocateProducerIDsResponse() + case 68: + return NewPtrConsumerGroupHeartbeatResponse() + } +} + +// NameForKey returns the name (e.g., "Fetch") corresponding to a given request key +// or "" if the key is unknown. +func NameForKey(key int16) string { + switch key { + default: + return "Unknown" + case 0: + return "Produce" + case 1: + return "Fetch" + case 2: + return "ListOffsets" + case 3: + return "Metadata" + case 4: + return "LeaderAndISR" + case 5: + return "StopReplica" + case 6: + return "UpdateMetadata" + case 7: + return "ControlledShutdown" + case 8: + return "OffsetCommit" + case 9: + return "OffsetFetch" + case 10: + return "FindCoordinator" + case 11: + return "JoinGroup" + case 12: + return "Heartbeat" + case 13: + return "LeaveGroup" + case 14: + return "SyncGroup" + case 15: + return "DescribeGroups" + case 16: + return "ListGroups" + case 17: + return "SASLHandshake" + case 18: + return "ApiVersions" + case 19: + return "CreateTopics" + case 20: + return "DeleteTopics" + case 21: + return "DeleteRecords" + case 22: + return "InitProducerID" + case 23: + return "OffsetForLeaderEpoch" + case 24: + return "AddPartitionsToTxn" + case 25: + return "AddOffsetsToTxn" + case 26: + return "EndTxn" + case 27: + return "WriteTxnMarkers" + case 28: + return "TxnOffsetCommit" + case 29: + return "DescribeACLs" + case 30: + return "CreateACLs" + case 31: + return "DeleteACLs" + case 32: + return "DescribeConfigs" + case 33: + return "AlterConfigs" + case 34: + return "AlterReplicaLogDirs" + case 35: + return "DescribeLogDirs" + case 36: + return "SASLAuthenticate" + case 37: + return "CreatePartitions" + case 38: + return "CreateDelegationToken" + case 39: + return "RenewDelegationToken" + case 40: + return "ExpireDelegationToken" + case 41: + return "DescribeDelegationToken" + case 42: + return "DeleteGroups" + case 43: + return "ElectLeaders" + case 44: + return "IncrementalAlterConfigs" + case 45: + return "AlterPartitionAssignments" + case 46: + return "ListPartitionReassignments" + case 47: + return "OffsetDelete" + case 48: + return "DescribeClientQuotas" + case 49: + return "AlterClientQuotas" + case 50: + return "DescribeUserSCRAMCredentials" + case 51: + return "AlterUserSCRAMCredentials" + case 52: + return "Vote" + case 53: + return "BeginQuorumEpoch" + case 54: + return "EndQuorumEpoch" + case 55: + return "DescribeQuorum" + case 56: + return "AlterPartition" + case 57: + return "UpdateFeatures" + case 58: + return "Envelope" + case 59: + return "FetchSnapshot" + case 60: + return "DescribeCluster" + case 61: + return "DescribeProducers" + case 62: + return "BrokerRegistration" + case 63: + return "BrokerHeartbeat" + case 64: + return "UnregisterBroker" + case 65: + return "DescribeTransactions" + case 66: + return "ListTransactions" + case 67: + return "AllocateProducerIDs" + case 68: + return "ConsumerGroupHeartbeat" + } +} + +// Key is a typed representation of a request key, with helper functions. +type Key int16 + +const ( + Produce Key = 0 + Fetch Key = 1 + ListOffsets Key = 2 + Metadata Key = 3 + LeaderAndISR Key = 4 + StopReplica Key = 5 + UpdateMetadata Key = 6 + ControlledShutdown Key = 7 + OffsetCommit Key = 8 + OffsetFetch Key = 9 + FindCoordinator Key = 10 + JoinGroup Key = 11 + Heartbeat Key = 12 + LeaveGroup Key = 13 + SyncGroup Key = 14 + DescribeGroups Key = 15 + ListGroups Key = 16 + SASLHandshake Key = 17 + ApiVersions Key = 18 + CreateTopics Key = 19 + DeleteTopics Key = 20 + DeleteRecords Key = 21 + InitProducerID Key = 22 + OffsetForLeaderEpoch Key = 23 + AddPartitionsToTxn Key = 24 + AddOffsetsToTxn Key = 25 + EndTxn Key = 26 + WriteTxnMarkers Key = 27 + TxnOffsetCommit Key = 28 + DescribeACLs Key = 29 + CreateACLs Key = 30 + DeleteACLs Key = 31 + DescribeConfigs Key = 32 + AlterConfigs Key = 33 + AlterReplicaLogDirs Key = 34 + DescribeLogDirs Key = 35 + SASLAuthenticate Key = 36 + CreatePartitions Key = 37 + CreateDelegationToken Key = 38 + RenewDelegationToken Key = 39 + ExpireDelegationToken Key = 40 + DescribeDelegationToken Key = 41 + DeleteGroups Key = 42 + ElectLeaders Key = 43 + IncrementalAlterConfigs Key = 44 + AlterPartitionAssignments Key = 45 + ListPartitionReassignments Key = 46 + OffsetDelete Key = 47 + DescribeClientQuotas Key = 48 + AlterClientQuotas Key = 49 + DescribeUserSCRAMCredentials Key = 50 + AlterUserSCRAMCredentials Key = 51 + Vote Key = 52 + BeginQuorumEpoch Key = 53 + EndQuorumEpoch Key = 54 + DescribeQuorum Key = 55 + AlterPartition Key = 56 + UpdateFeatures Key = 57 + Envelope Key = 58 + FetchSnapshot Key = 59 + DescribeCluster Key = 60 + DescribeProducers Key = 61 + BrokerRegistration Key = 62 + BrokerHeartbeat Key = 63 + UnregisterBroker Key = 64 + DescribeTransactions Key = 65 + ListTransactions Key = 66 + AllocateProducerIDs Key = 67 + ConsumerGroupHeartbeat Key = 68 +) + +// Name returns the name for this key. +func (k Key) Name() string { return NameForKey(int16(k)) } + +// Request returns a new request for this key if the key is known. +func (k Key) Request() Request { return RequestForKey(int16(k)) } + +// Response returns a new response for this key if the key is known. +func (k Key) Response() Response { return ResponseForKey(int16(k)) } + +// Int16 is an alias for int16(k). +func (k Key) Int16() int16 { return int16(k) } + +// A type of config. +// +// Possible values and their meanings: +// +// * 2 (TOPIC) +// +// * 4 (BROKER) +// +// * 8 (BROKER_LOGGER) +type ConfigResourceType int8 + +func (v ConfigResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 2: + return "TOPIC" + case 4: + return "BROKER" + case 8: + return "BROKER_LOGGER" + } +} + +func ConfigResourceTypeStrings() []string { + return []string{ + "TOPIC", + "BROKER", + "BROKER_LOGGER", + } +} + +// ParseConfigResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigResourceType(s string) (ConfigResourceType, error) { + switch strnorm(s) { + case "topic": + return 2, nil + case "broker": + return 4, nil + case "brokerlogger": + return 8, nil + default: + return 0, fmt.Errorf("ConfigResourceType: unable to parse %q", s) + } +} + +const ( + ConfigResourceTypeUnknown ConfigResourceType = 0 + ConfigResourceTypeTopic ConfigResourceType = 2 + ConfigResourceTypeBroker ConfigResourceType = 4 + ConfigResourceTypeBrokerLogger ConfigResourceType = 8 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigResourceType) UnmarshalText(text []byte) error { + v, err := ParseConfigResourceType(string(text)) + *e = v + return err +} + +// Where a config entry is from. If there are no config synonyms, +// the source is DEFAULT_CONFIG. +// +// Possible values and their meanings: +// +// * 1 (DYNAMIC_TOPIC_CONFIG) +// Dynamic topic config for a specific topic. +// +// * 2 (DYNAMIC_BROKER_CONFIG) +// Dynamic broker config for a specific broker. +// +// * 3 (DYNAMIC_DEFAULT_BROKER_CONFIG) +// Dynamic broker config used as the default for all brokers in a cluster. +// +// * 4 (STATIC_BROKER_CONFIG) +// Static broker config provided at start up. +// +// * 5 (DEFAULT_CONFIG) +// Built-in default configuration for those that have defaults. +// +// * 6 (DYNAMIC_BROKER_LOGGER_CONFIG) +// Broker logger; see KIP-412. +type ConfigSource int8 + +func (v ConfigSource) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "DYNAMIC_TOPIC_CONFIG" + case 2: + return "DYNAMIC_BROKER_CONFIG" + case 3: + return "DYNAMIC_DEFAULT_BROKER_CONFIG" + case 4: + return "STATIC_BROKER_CONFIG" + case 5: + return "DEFAULT_CONFIG" + case 6: + return "DYNAMIC_BROKER_LOGGER_CONFIG" + } +} + +func ConfigSourceStrings() []string { + return []string{ + "DYNAMIC_TOPIC_CONFIG", + "DYNAMIC_BROKER_CONFIG", + "DYNAMIC_DEFAULT_BROKER_CONFIG", + "STATIC_BROKER_CONFIG", + "DEFAULT_CONFIG", + "DYNAMIC_BROKER_LOGGER_CONFIG", + } +} + +// ParseConfigSource normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigSource(s string) (ConfigSource, error) { + switch strnorm(s) { + case "dynamictopicconfig": + return 1, nil + case "dynamicbrokerconfig": + return 2, nil + case "dynamicdefaultbrokerconfig": + return 3, nil + case "staticbrokerconfig": + return 4, nil + case "defaultconfig": + return 5, nil + case "dynamicbrokerloggerconfig": + return 6, nil + default: + return 0, fmt.Errorf("ConfigSource: unable to parse %q", s) + } +} + +const ( + ConfigSourceUnknown ConfigSource = 0 + ConfigSourceDynamicTopicConfig ConfigSource = 1 + ConfigSourceDynamicBrokerConfig ConfigSource = 2 + ConfigSourceDynamicDefaultBrokerConfig ConfigSource = 3 + ConfigSourceStaticBrokerConfig ConfigSource = 4 + ConfigSourceDefaultConfig ConfigSource = 5 + ConfigSourceDynamicBrokerLoggerConfig ConfigSource = 6 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigSource) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigSource) UnmarshalText(text []byte) error { + v, err := ParseConfigSource(string(text)) + *e = v + return err +} + +// A configuration data type. +// +// Possible values and their meanings: +// +// * 1 (BOOLEAN) +// +// * 2 (STRING) +// +// * 3 (INT) +// +// * 4 (SHORT) +// +// * 5 (LONG) +// +// * 6 (DOUBLE) +// +// * 7 (LIST) +// +// * 8 (CLASS) +// +// * 9 (PASSWORD) +type ConfigType int8 + +func (v ConfigType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "BOOLEAN" + case 2: + return "STRING" + case 3: + return "INT" + case 4: + return "SHORT" + case 5: + return "LONG" + case 6: + return "DOUBLE" + case 7: + return "LIST" + case 8: + return "CLASS" + case 9: + return "PASSWORD" + } +} + +func ConfigTypeStrings() []string { + return []string{ + "BOOLEAN", + "STRING", + "INT", + "SHORT", + "LONG", + "DOUBLE", + "LIST", + "CLASS", + "PASSWORD", + } +} + +// ParseConfigType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigType(s string) (ConfigType, error) { + switch strnorm(s) { + case "boolean": + return 1, nil + case "string": + return 2, nil + case "int": + return 3, nil + case "short": + return 4, nil + case "long": + return 5, nil + case "double": + return 6, nil + case "list": + return 7, nil + case "class": + return 8, nil + case "password": + return 9, nil + default: + return 0, fmt.Errorf("ConfigType: unable to parse %q", s) + } +} + +const ( + ConfigTypeUnknown ConfigType = 0 + ConfigTypeBoolean ConfigType = 1 + ConfigTypeString ConfigType = 2 + ConfigTypeInt ConfigType = 3 + ConfigTypeShort ConfigType = 4 + ConfigTypeLong ConfigType = 5 + ConfigTypeDouble ConfigType = 6 + ConfigTypeList ConfigType = 7 + ConfigTypeClass ConfigType = 8 + ConfigTypePassword ConfigType = 9 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigType) UnmarshalText(text []byte) error { + v, err := ParseConfigType(string(text)) + *e = v + return err +} + +// An incremental configuration operation. +// +// Possible values and their meanings: +// +// * 0 (SET) +// +// * 1 (DELETE) +// +// * 2 (APPEND) +// +// * 3 (SUBTRACT) +type IncrementalAlterConfigOp int8 + +func (v IncrementalAlterConfigOp) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "SET" + case 1: + return "DELETE" + case 2: + return "APPEND" + case 3: + return "SUBTRACT" + } +} + +func IncrementalAlterConfigOpStrings() []string { + return []string{ + "SET", + "DELETE", + "APPEND", + "SUBTRACT", + } +} + +// ParseIncrementalAlterConfigOp normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseIncrementalAlterConfigOp(s string) (IncrementalAlterConfigOp, error) { + switch strnorm(s) { + case "set": + return 0, nil + case "delete": + return 1, nil + case "append": + return 2, nil + case "subtract": + return 3, nil + default: + return 0, fmt.Errorf("IncrementalAlterConfigOp: unable to parse %q", s) + } +} + +const ( + IncrementalAlterConfigOpSet IncrementalAlterConfigOp = 0 + IncrementalAlterConfigOpDelete IncrementalAlterConfigOp = 1 + IncrementalAlterConfigOpAppend IncrementalAlterConfigOp = 2 + IncrementalAlterConfigOpSubtract IncrementalAlterConfigOp = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e IncrementalAlterConfigOp) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *IncrementalAlterConfigOp) UnmarshalText(text []byte) error { + v, err := ParseIncrementalAlterConfigOp(string(text)) + *e = v + return err +} + +// ACLResourceType is a type of resource to use for ACLs. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// +// * 2 (TOPIC) +// +// * 3 (GROUP) +// +// * 4 (CLUSTER) +// +// * 5 (TRANSACTIONAL_ID) +// +// * 6 (DELEGATION_TOKEN) +// +// * 7 (USER) +type ACLResourceType int8 + +func (v ACLResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "TOPIC" + case 3: + return "GROUP" + case 4: + return "CLUSTER" + case 5: + return "TRANSACTIONAL_ID" + case 6: + return "DELEGATION_TOKEN" + case 7: + return "USER" + } +} + +func ACLResourceTypeStrings() []string { + return []string{ + "ANY", + "TOPIC", + "GROUP", + "CLUSTER", + "TRANSACTIONAL_ID", + "DELEGATION_TOKEN", + "USER", + } +} + +// ParseACLResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourceType(s string) (ACLResourceType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "topic": + return 2, nil + case "group": + return 3, nil + case "cluster": + return 4, nil + case "transactionalid": + return 5, nil + case "delegationtoken": + return 6, nil + case "user": + return 7, nil + default: + return 0, fmt.Errorf("ACLResourceType: unable to parse %q", s) + } +} + +const ( + ACLResourceTypeUnknown ACLResourceType = 0 + ACLResourceTypeAny ACLResourceType = 1 + ACLResourceTypeTopic ACLResourceType = 2 + ACLResourceTypeGroup ACLResourceType = 3 + ACLResourceTypeCluster ACLResourceType = 4 + ACLResourceTypeTransactionalId ACLResourceType = 5 + ACLResourceTypeDelegationToken ACLResourceType = 6 + ACLResourceTypeUser ACLResourceType = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourceType) UnmarshalText(text []byte) error { + v, err := ParseACLResourceType(string(text)) + *e = v + return err +} + +// ACLResourcePatternType is how an acl's ResourceName is understood. +// +// This field was added with Kafka 2.0.0 for KIP-290. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (MATCH) +// Performs pattern matching; i.e., a literal match, or a prefix match, or wildcard. +// +// * 3 (LITERAL) +// The name must be an exact match. +// +// * 4 (PREFIXED) +// The name must have our requested name as a prefix (that is, "foo" will match on "foobar"). +type ACLResourcePatternType int8 + +func (v ACLResourcePatternType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "MATCH" + case 3: + return "LITERAL" + case 4: + return "PREFIXED" + } +} + +func ACLResourcePatternTypeStrings() []string { + return []string{ + "ANY", + "MATCH", + "LITERAL", + "PREFIXED", + } +} + +// ParseACLResourcePatternType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourcePatternType(s string) (ACLResourcePatternType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "match": + return 2, nil + case "literal": + return 3, nil + case "prefixed": + return 4, nil + default: + return 0, fmt.Errorf("ACLResourcePatternType: unable to parse %q", s) + } +} + +const ( + ACLResourcePatternTypeUnknown ACLResourcePatternType = 0 + ACLResourcePatternTypeAny ACLResourcePatternType = 1 + ACLResourcePatternTypeMatch ACLResourcePatternType = 2 + ACLResourcePatternTypeLiteral ACLResourcePatternType = 3 + ACLResourcePatternTypePrefixed ACLResourcePatternType = 4 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourcePatternType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourcePatternType) UnmarshalText(text []byte) error { + v, err := ParseACLResourcePatternType(string(text)) + *e = v + return err +} + +// An ACL permission type. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Any permission. +// +// * 2 (DENY) +// Any deny permission. +// +// * 3 (ALLOW) +// Any allow permission. +type ACLPermissionType int8 + +func (v ACLPermissionType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "DENY" + case 3: + return "ALLOW" + } +} + +func ACLPermissionTypeStrings() []string { + return []string{ + "ANY", + "DENY", + "ALLOW", + } +} + +// ParseACLPermissionType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLPermissionType(s string) (ACLPermissionType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "deny": + return 2, nil + case "allow": + return 3, nil + default: + return 0, fmt.Errorf("ACLPermissionType: unable to parse %q", s) + } +} + +const ( + ACLPermissionTypeUnknown ACLPermissionType = 0 + ACLPermissionTypeAny ACLPermissionType = 1 + ACLPermissionTypeDeny ACLPermissionType = 2 + ACLPermissionTypeAllow ACLPermissionType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLPermissionType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLPermissionType) UnmarshalText(text []byte) error { + v, err := ParseACLPermissionType(string(text)) + *e = v + return err +} + +// An ACL operation. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (ALL) +// Matches anything granted all permissions. +// +// * 3 (READ) +// +// * 4 (WRITE) +// +// * 5 (CREATE) +// +// * 6 (DELETE) +// +// * 7 (ALTER) +// +// * 8 (DESCRIBE) +// +// * 9 (CLUSTER_ACTION) +// +// * 10 (DESCRIBE_CONFIGS) +// +// * 11 (ALTER_CONFIGS) +// +// * 12 (IDEMPOTENT_WRITE) +// +// * 13 (CREATE_TOKENS) +// +// * 14 (DESCRIBE_TOKENS) +type ACLOperation int8 + +func (v ACLOperation) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "ALL" + case 3: + return "READ" + case 4: + return "WRITE" + case 5: + return "CREATE" + case 6: + return "DELETE" + case 7: + return "ALTER" + case 8: + return "DESCRIBE" + case 9: + return "CLUSTER_ACTION" + case 10: + return "DESCRIBE_CONFIGS" + case 11: + return "ALTER_CONFIGS" + case 12: + return "IDEMPOTENT_WRITE" + case 13: + return "CREATE_TOKENS" + case 14: + return "DESCRIBE_TOKENS" + } +} + +func ACLOperationStrings() []string { + return []string{ + "ANY", + "ALL", + "READ", + "WRITE", + "CREATE", + "DELETE", + "ALTER", + "DESCRIBE", + "CLUSTER_ACTION", + "DESCRIBE_CONFIGS", + "ALTER_CONFIGS", + "IDEMPOTENT_WRITE", + "CREATE_TOKENS", + "DESCRIBE_TOKENS", + } +} + +// ParseACLOperation normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLOperation(s string) (ACLOperation, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "all": + return 2, nil + case "read": + return 3, nil + case "write": + return 4, nil + case "create": + return 5, nil + case "delete": + return 6, nil + case "alter": + return 7, nil + case "describe": + return 8, nil + case "clusteraction": + return 9, nil + case "describeconfigs": + return 10, nil + case "alterconfigs": + return 11, nil + case "idempotentwrite": + return 12, nil + case "createtokens": + return 13, nil + case "describetokens": + return 14, nil + default: + return 0, fmt.Errorf("ACLOperation: unable to parse %q", s) + } +} + +const ( + ACLOperationUnknown ACLOperation = 0 + ACLOperationAny ACLOperation = 1 + ACLOperationAll ACLOperation = 2 + ACLOperationRead ACLOperation = 3 + ACLOperationWrite ACLOperation = 4 + ACLOperationCreate ACLOperation = 5 + ACLOperationDelete ACLOperation = 6 + ACLOperationAlter ACLOperation = 7 + ACLOperationDescribe ACLOperation = 8 + ACLOperationClusterAction ACLOperation = 9 + ACLOperationDescribeConfigs ACLOperation = 10 + ACLOperationAlterConfigs ACLOperation = 11 + ACLOperationIdempotentWrite ACLOperation = 12 + ACLOperationCreateTokens ACLOperation = 13 + ACLOperationDescribeTokens ACLOperation = 14 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLOperation) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLOperation) UnmarshalText(text []byte) error { + v, err := ParseACLOperation(string(text)) + *e = v + return err +} + +// TransactionState is the state of a transaction. +// +// Possible values and their meanings: +// +// * 0 (Empty) +// +// * 1 (Ongoing) +// +// * 2 (PrepareCommit) +// +// * 3 (PrepareAbort) +// +// * 4 (CompleteCommit) +// +// * 5 (CompleteAbort) +// +// * 6 (Dead) +// +// * 7 (PrepareEpochFence) +type TransactionState int8 + +func (v TransactionState) String() string { + switch v { + default: + return "Unknown" + case 0: + return "Empty" + case 1: + return "Ongoing" + case 2: + return "PrepareCommit" + case 3: + return "PrepareAbort" + case 4: + return "CompleteCommit" + case 5: + return "CompleteAbort" + case 6: + return "Dead" + case 7: + return "PrepareEpochFence" + } +} + +func TransactionStateStrings() []string { + return []string{ + "Empty", + "Ongoing", + "PrepareCommit", + "PrepareAbort", + "CompleteCommit", + "CompleteAbort", + "Dead", + "PrepareEpochFence", + } +} + +// ParseTransactionState normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseTransactionState(s string) (TransactionState, error) { + switch strnorm(s) { + case "empty": + return 0, nil + case "ongoing": + return 1, nil + case "preparecommit": + return 2, nil + case "prepareabort": + return 3, nil + case "completecommit": + return 4, nil + case "completeabort": + return 5, nil + case "dead": + return 6, nil + case "prepareepochfence": + return 7, nil + default: + return 0, fmt.Errorf("TransactionState: unable to parse %q", s) + } +} + +const ( + TransactionStateEmpty TransactionState = 0 + TransactionStateOngoing TransactionState = 1 + TransactionStatePrepareCommit TransactionState = 2 + TransactionStatePrepareAbort TransactionState = 3 + TransactionStateCompleteCommit TransactionState = 4 + TransactionStateCompleteAbort TransactionState = 5 + TransactionStateDead TransactionState = 6 + TransactionStatePrepareEpochFence TransactionState = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e TransactionState) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *TransactionState) UnmarshalText(text []byte) error { + v, err := ParseTransactionState(string(text)) + *e = v + return err +} + +// QuotasMatchType specifies how to match a Quota entity as part of the DescribeClientQuotasRequestComponent. +// +// Possible values and their meanings: +// +// * 0 (EXACT) +// Matches all quotas for the given EntityType with names equal to the Match field. +// +// * 1 (DEFAULT) +// Matches the default for the given EntityType. +// +// * 2 (ANY) +// Matches all named quotas and default quotas for the given EntityType. +type QuotasMatchType int8 + +func (v QuotasMatchType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "EXACT" + case 1: + return "DEFAULT" + case 2: + return "ANY" + } +} + +func QuotasMatchTypeStrings() []string { + return []string{ + "EXACT", + "DEFAULT", + "ANY", + } +} + +// ParseQuotasMatchType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseQuotasMatchType(s string) (QuotasMatchType, error) { + switch strnorm(s) { + case "exact": + return 0, nil + case "default": + return 1, nil + case "any": + return 2, nil + default: + return 0, fmt.Errorf("QuotasMatchType: unable to parse %q", s) + } +} + +const ( + QuotasMatchTypeExact QuotasMatchType = 0 + QuotasMatchTypeDefault QuotasMatchType = 1 + QuotasMatchTypeAny QuotasMatchType = 2 +) + +// MarshalText implements encoding.TextMarshaler. +func (e QuotasMatchType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *QuotasMatchType) UnmarshalText(text []byte) error { + v, err := ParseQuotasMatchType(string(text)) + *e = v + return err +} + +// Possible values and their meanings: +// +// * 0 (ABORT) +// +// * 1 (COMMIT) +// +// * 2 (QUORUM_REASSIGNMENT) +// +// * 3 (LEADER_CHANGE) +type ControlRecordKeyType int8 + +func (v ControlRecordKeyType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "ABORT" + case 1: + return "COMMIT" + case 2: + return "QUORUM_REASSIGNMENT" + case 3: + return "LEADER_CHANGE" + } +} + +func ControlRecordKeyTypeStrings() []string { + return []string{ + "ABORT", + "COMMIT", + "QUORUM_REASSIGNMENT", + "LEADER_CHANGE", + } +} + +// ParseControlRecordKeyType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseControlRecordKeyType(s string) (ControlRecordKeyType, error) { + switch strnorm(s) { + case "abort": + return 0, nil + case "commit": + return 1, nil + case "quorumreassignment": + return 2, nil + case "leaderchange": + return 3, nil + default: + return 0, fmt.Errorf("ControlRecordKeyType: unable to parse %q", s) + } +} + +const ( + ControlRecordKeyTypeAbort ControlRecordKeyType = 0 + ControlRecordKeyTypeCommit ControlRecordKeyType = 1 + ControlRecordKeyTypeQuorumReassignment ControlRecordKeyType = 2 + ControlRecordKeyTypeLeaderChange ControlRecordKeyType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ControlRecordKeyType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ControlRecordKeyType) UnmarshalText(text []byte) error { + v, err := ParseControlRecordKeyType(string(text)) + *e = v + return err +} + +func strnorm(s string) string { + s = strings.ReplaceAll(s, ".", "") + s = strings.ReplaceAll(s, "_", "") + s = strings.ReplaceAll(s, "-", "") + s = strings.TrimSpace(s) + s = strings.ToLower(s) + return s +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go new file mode 100644 index 000000000000..2c5990d06a21 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go @@ -0,0 +1,850 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go new file mode 100644 index 000000000000..86499fd79660 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go @@ -0,0 +1,174 @@ +package kmsg + +import "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" + +// A Record is a Kafka v0.11.0.0 record. It corresponds to an individual +// message as it is written on the wire. +type Record struct { + // Length is the length of this record on the wire of everything that + // follows this field. It is an int32 encoded as a varint. + Length int32 + + // Attributes are record level attributes. This field currently is unused. + Attributes int8 + + // TimestampDelta is the millisecond delta of this record's timestamp + // from the record's RecordBatch's FirstTimestamp. + // + // NOTE: this is actually an int64 but we cannot change the type for + // backwards compatibility. Use TimestampDelta64. + TimestampDelta int32 + TimestampDelta64 int64 + + // OffsetDelta is the delta of this record's offset from the record's + // RecordBatch's FirstOffset. + // + // For producing, this is usually equal to the index of the record in + // the record batch. + OffsetDelta int32 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte + + // Headers are optional user provided metadata for records. Unlike normal + // arrays, the number of headers is encoded as a varint. + Headers []Header +} + +func (v *Record) AppendTo(dst []byte) []byte { + { + v := v.Length + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + d := v.TimestampDelta64 + if d == 0 { + d = int64(v.TimestampDelta) + } + dst = kbin.AppendVarlong(dst, d) + } + { + v := v.OffsetDelta + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Key + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Headers + dst = kbin.AppendVarint(dst, int32(len(v))) + for i := range v { + v := &v[i] + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + } + } + return dst +} + +func (v *Record) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Record) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Record) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Varint() + s.Length = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Varlong() + s.TimestampDelta64 = v + s.TimestampDelta = int32(v) + } + { + v := b.Varint() + s.OffsetDelta = v + } + { + v := b.VarintBytes() + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + { + v := s.Headers + a := v + var l int32 + l = b.VarintArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]Header, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + } + v = a + s.Headers = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Record. +func (v *Record) Default() { +} + +// NewRecord returns a default Record +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecord() Record { + var v Record + v.Default() + return v +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go b/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go new file mode 100644 index 000000000000..3081c346f5aa --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go @@ -0,0 +1,1166 @@ +// Package kversion specifies versions for Kafka request keys. +// +// Kafka technically has internal broker versions that bump multiple times per +// release. This package only defines releases and tip. +package kversion + +import ( + "bytes" + "fmt" + "regexp" + "sync" + "text/tabwriter" + + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Versions is a list of versions, with each item corresponding to a Kafka key +// and each item's value corresponding to the max version supported. +// +// Minimum versions are not currently tracked because all keys have a minimum +// version of zero. The internals of a Versions may change in the future to +// support minimum versions; the outward facing API of Versions should not +// change to support this. +// +// As well, supported features may be added in the future. +type Versions struct { + // If any version is -1, then it is left out in that version. + // This was first done in version 2.7.0, where Kafka added support + // for 52, 53, 54, 55, but it was not a part of the 2.7.0 release, + // so ApiVersionsResponse goes from 51 to 56. + k2v []int16 +} + +var ( + reFromString *regexp.Regexp + reFromStringOnce sync.Once +) + +var versions = []struct { + name string + v *Versions +}{ + {"v0.8.0", V0_8_0()}, + {"v0.8.1", V0_8_1()}, + {"v0.8.2", V0_8_2()}, + {"v0.9.0", V0_9_0()}, + {"v0.10.0", V0_10_0()}, + {"v0.10.1", V0_10_1()}, + {"v0.10.2", V0_10_2()}, + {"v0.11.0", V0_11_0()}, + {"v1.0", V1_0_0()}, + {"v1.1", V1_1_0()}, + {"v2.0", V2_0_0()}, + {"v2.1", V2_1_0()}, + {"v2.2", V2_2_0()}, + {"v2.3", V2_3_0()}, + {"v2.4", V2_4_0()}, + {"v2.5", V2_5_0()}, + {"v2.6", V2_6_0()}, + {"v2.7", V2_7_0()}, + {"v2.8", V2_8_0()}, + {"v3.0", V3_0_0()}, + {"v3.1", V3_1_0()}, + {"v3.2", V3_2_0()}, + {"v3.3", V3_3_0()}, + {"v3.4", V3_4_0()}, + {"v3.5", V3_5_0()}, + {"v3.6", V3_6_0()}, + {"v3.7", V3_7_0()}, +} + +// VersionStrings returns all recognized versions, minus any patch, that can be +// used as input to FromString. +func VersionStrings() []string { + var vs []string + for _, v := range versions { + vs = append(vs, v.name) + } + return vs +} + +// FromString returns a Versions from v. +// The expected input is: +// - for v0, v0.#.# or v0.#.#.# +// - for v1, v1.# or v1.#.# +// +// The "v" is optional. +func FromString(v string) *Versions { + reFromStringOnce.Do(func() { + // 0: entire string + // 1: v1+ match, minus patch + // 2: v0 match, minus subpatch + reFromString = regexp.MustCompile(`^(?:(v?[1-9]+\.\d+)(?:\.\d+)?|(v?0\.\d+\.\d+)(?:\.\d+)?)$`) + }) + m := reFromString.FindStringSubmatch(v) + if m == nil { + return nil + } + v = m[1] + if m[2] != "" { + v = m[2] + } + withv := "v" + v + for _, v2 := range versions { + if v2.name == v || v2.name == withv { + return v2.v + } + } + return nil +} + +// FromApiVersionsResponse returns a Versions from a kmsg.ApiVersionsResponse. +func FromApiVersionsResponse(r *kmsg.ApiVersionsResponse) *Versions { + var v Versions + for _, key := range r.ApiKeys { + v.SetMaxKeyVersion(key.ApiKey, key.MaxVersion) + } + return &v +} + +// HasKey returns true if the versions contains the given key. +func (vs *Versions) HasKey(k int16) bool { + _, has := vs.LookupMaxKeyVersion(k) + return has +} + +// LookupMaxKeyVersion returns the version for the given key and whether the +// key exists. If the key does not exist, this returns (-1, false). +func (vs *Versions) LookupMaxKeyVersion(k int16) (int16, bool) { + if k < 0 { + return -1, false + } + if int(k) >= len(vs.k2v) { + return -1, false + } + version := vs.k2v[k] + if version < 0 { + return -1, false + } + return version, true +} + +// SetMaxKeyVersion sets the max version for the given key. +// +// Setting a version to -1 unsets the key. +// +// Versions are backed by a slice; if the slice is not long enough, it is +// extended to fit the key. +func (vs *Versions) SetMaxKeyVersion(k, v int16) { + if v < 0 { + v = -1 + } + // If the version is < 0, we are unsetting a version. If we are + // unsetting a version that is more than the amount of keys we already + // have, we have no reason to unset. + if k < 0 || v < 0 && int(k) >= len(vs.k2v)+1 { + return + } + needLen := int(k) + 1 + for len(vs.k2v) < needLen { + vs.k2v = append(vs.k2v, -1) + } + vs.k2v[k] = v +} + +// Equal returns whether two versions are equal. +func (vs *Versions) Equal(other *Versions) bool { + // We allow the version slices to be of different lengths, so long as + // the versions for keys in one and not the other are -1. + // + // Basically, all non-negative-one keys must be equal. + long, short := vs.k2v, other.k2v + if len(short) > len(long) { + long, short = short, long + } + for i, v := range short { + if v != long[i] { + return false + } + } + for _, v := range long[len(short):] { + if v >= 0 { + return false + } + } + return true +} + +// EachMaxKeyVersion calls fn for each key and max version +func (vs *Versions) EachMaxKeyVersion(fn func(k, v int16)) { + for k, v := range vs.k2v { + if v >= 0 { + fn(int16(k), v) + } + } +} + +// VersionGuessOpt is an option to change how version guessing is done. +type VersionGuessOpt interface { + apply(*guessCfg) +} + +type guessOpt struct{ fn func(*guessCfg) } + +func (opt guessOpt) apply(cfg *guessCfg) { opt.fn(cfg) } + +// SkipKeys skips the given keys while guessing versions. +func SkipKeys(keys ...int16) VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.skipKeys = keys }} +} + +// TryRaftBroker changes from guessing the version for a classical ZooKeeper +// based broker to guessing for a raft based broker (v2.8+). +// +// Note that with raft, there can be a TryRaftController attempt as well. +func TryRaftBroker() VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.listener = rBroker }} +} + +// TryRaftController changes from guessing the version for a classical +// ZooKeeper based broker to guessing for a raft based controller broker +// (v2.8+). +// +// Note that with raft, there can be a TryRaftBroker attempt as well. Odds are +// that if you are an end user speaking to a raft based Kafka cluster, you are +// speaking to a raft broker. The controller is specifically for broker to +// broker communication. +func TryRaftController() VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.listener = rController }} +} + +type guessCfg struct { + skipKeys []int16 + listener listener +} + +// VersionGuess attempts to guess which version of Kafka these versions belong +// to. If an exact match can be determined, this returns a string in the format +// v0.#.# or v#.# (depending on whether Kafka is pre-1.0 or post). For +// example, v0.8.0 or v2.7. +// +// Patch numbers are not included in the guess as it is not possible to +// determine the Kafka patch version being used as a client. +// +// If the version is determined to be higher than kversion knows of or is tip, +// this package returns "at least v#.#". +// +// Custom versions, or in-between versions, are detected and return slightly +// more verbose strings. +// +// Options can be specified to change how version guessing is performed, for +// example, certain keys can be skipped, or the guessing can try evaluating the +// versions as Raft broker based versions. +// +// Internally, this function tries guessing the version against both KRaft and +// Kafka APIs. The more exact match is returned. +func (vs *Versions) VersionGuess(opts ...VersionGuessOpt) string { + standard := vs.versionGuess(opts...) + raftBroker := vs.versionGuess(append(opts, TryRaftBroker())...) + raftController := vs.versionGuess(append(opts, TryRaftController())...) + + // If any of these are exact, return the exact guess. + for _, g := range []guess{ + standard, + raftBroker, + raftController, + } { + if g.how == guessExact { + return g.String() + } + } + + // If any are atLeast, that means it is newer than we can guess and we + // return the highest version. + for _, g := range []guess{ + standard, + raftBroker, + raftController, + } { + if g.how == guessAtLeast { + return g.String() + } + } + + // This is a custom version. We could do some advanced logic to try to + // return highest of all three guesses, but that may be inaccurate: + // KRaft may detect a higher guess because not all requests exist in + // KRaft. Instead, we just return our standard guess. + return standard.String() +} + +type guess struct { + v1 string + v2 string // for between + how int8 +} + +const ( + guessExact = iota + guessAtLeast + guessCustomUnknown + guessCustomAtLeast + guessBetween + guessNotEven +) + +func (g guess) String() string { + switch g.how { + case guessExact: + return g.v1 + case guessAtLeast: + return "at least " + g.v1 + case guessCustomUnknown: + return "unknown custom version" + case guessCustomAtLeast: + return "unknown custom version at least " + g.v1 + case guessBetween: + return "between " + g.v1 + " and " + g.v2 + case guessNotEven: + return "not even " + g.v1 + } + return g.v1 +} + +func (vs *Versions) versionGuess(opts ...VersionGuessOpt) guess { + cfg := guessCfg{ + listener: zkBroker, + // Envelope was added in 2.7 for kraft and zkBroker in 3.4; we + // need to skip it for 2.7 through 3.4 otherwise the version + // detection fails. We can just skip it generally since there + // are enough differentiating factors that accurately detecting + // envelope doesn't matter. + // + // TODO: add introduced-version to differentiate some specific + // keys. + skipKeys: []int16{4, 5, 6, 7, 27, 52, 53, 54, 55, 56, 57, 58, 59, 62, 63, 64, 67}, + } + for _, opt := range opts { + opt.apply(&cfg) + } + + skip := make(map[int16]bool, len(cfg.skipKeys)) + for _, k := range cfg.skipKeys { + skip[k] = true + } + + var last string + cmp := make(map[int16]int16, len(maxTip)) + cmpskip := make(map[int16]int16) + for _, comparison := range []struct { + cmp listenerKeys + name string + }{ + {max080, "v0.8.0"}, + {max081, "v0.8.1"}, + {max082, "v0.8.2"}, + {max090, "v0.9.0"}, + {max0100, "v0.10.0"}, + {max0101, "v0.10.1"}, + {max0102, "v0.10.2"}, + {max0110, "v0.11.0"}, + {max100, "v1.0"}, + {max110, "v1.1"}, + {max200, "v2.0"}, + {max210, "v2.1"}, + {max220, "v2.2"}, + {max230, "v2.3"}, + {max240, "v2.4"}, + {max250, "v2.5"}, + {max260, "v2.6"}, + {max270, "v2.7"}, + {max280, "v2.8"}, + {max300, "v3.0"}, + {max310, "v3.1"}, + {max320, "v3.2"}, + {max330, "v3.3"}, + {max340, "v3.4"}, + {max350, "v3.5"}, + {max360, "v3.6"}, + {max370, "v3.7"}, + } { + for k, v := range comparison.cmp.filter(cfg.listener) { + if v == -1 { + continue + } + k16 := int16(k) + if skip[k16] { + cmpskip[k16] = v + } else { + cmp[k16] = v + } + } + + var under, equal, over bool + + for k, v := range vs.k2v { + k16 := int16(k) + if skip[k16] { + skipv, ok := cmpskip[k16] + if v == -1 || !ok { + continue + } + cmp[k16] = skipv + } + cmpv, has := cmp[k16] + if has { + // If our version for this key is less than the + // comparison versions, then we are less than what we + // are comparing. + if v < cmpv { + under = true + } else if v > cmpv { + // Similarly, if our version is more, then we + // are over what we are comparing. + over = true + } else { + equal = true + } + delete(cmp, k16) + } else if v >= 0 { + // If what we are comparing to does not even have this + // key **and** our version is larger non-zero, then our + // version is larger than what we are comparing to. + // + // We can have a negative version if a key was manually + // unset. + over = true + } + // If the version is < 0, the key is unset. + } + + // If our versions did not clear out what we are comparing against, we + // do not have all keys that we need for this version. + if len(cmp) > 0 { + under = true + } + + current := comparison.name + switch { + case under && over: + // Regardless of equal being true or not, this is a custom version. + if last != "" { + return guess{v1: last, how: guessCustomAtLeast} + } + return guess{v1: last, how: guessCustomUnknown} + + case under: + // Regardless of equal being true or not, we have not yet hit + // this version. + if last != "" { + return guess{v1: last, v2: current, how: guessBetween} + } + return guess{v1: current, how: guessNotEven} + + case over: + // Regardless of equal being true or not, we try again. + last = current + + case equal: + return guess{v1: current, how: guessExact} + } + // At least one of under, equal, or over must be true, so there + // is no default case. + } + + return guess{v1: last, how: guessAtLeast} +} + +// String returns a string representation of the versions; the format may +// change. +func (vs *Versions) String() string { + var buf bytes.Buffer + w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0) + for k, v := range vs.k2v { + if v < 0 { + continue + } + name := kmsg.NameForKey(int16(k)) + if name == "" { + name = "Unknown" + } + fmt.Fprintf(w, "%s\t%d\n", name, v) + } + w.Flush() + return buf.String() +} + +// Stable is a shortcut for the latest _released_ Kafka versions. +// +// This is the default version used in kgo to avoid breaking tip changes. +func Stable() *Versions { return zkBrokerOf(maxStable) } + +// Tip is the latest defined Kafka key versions; this may be slightly out of date. +func Tip() *Versions { return zkBrokerOf(maxTip) } + +func V0_8_0() *Versions { return zkBrokerOf(max080) } +func V0_8_1() *Versions { return zkBrokerOf(max081) } +func V0_8_2() *Versions { return zkBrokerOf(max082) } +func V0_9_0() *Versions { return zkBrokerOf(max090) } +func V0_10_0() *Versions { return zkBrokerOf(max0100) } +func V0_10_1() *Versions { return zkBrokerOf(max0101) } +func V0_10_2() *Versions { return zkBrokerOf(max0102) } +func V0_11_0() *Versions { return zkBrokerOf(max0110) } +func V1_0_0() *Versions { return zkBrokerOf(max100) } +func V1_1_0() *Versions { return zkBrokerOf(max110) } +func V2_0_0() *Versions { return zkBrokerOf(max200) } +func V2_1_0() *Versions { return zkBrokerOf(max210) } +func V2_2_0() *Versions { return zkBrokerOf(max220) } +func V2_3_0() *Versions { return zkBrokerOf(max230) } +func V2_4_0() *Versions { return zkBrokerOf(max240) } +func V2_5_0() *Versions { return zkBrokerOf(max250) } +func V2_6_0() *Versions { return zkBrokerOf(max260) } +func V2_7_0() *Versions { return zkBrokerOf(max270) } +func V2_8_0() *Versions { return zkBrokerOf(max280) } +func V3_0_0() *Versions { return zkBrokerOf(max300) } +func V3_1_0() *Versions { return zkBrokerOf(max310) } +func V3_2_0() *Versions { return zkBrokerOf(max320) } +func V3_3_0() *Versions { return zkBrokerOf(max330) } +func V3_4_0() *Versions { return zkBrokerOf(max340) } +func V3_5_0() *Versions { return zkBrokerOf(max350) } +func V3_6_0() *Versions { return zkBrokerOf(max360) } +func V3_7_0() *Versions { return zkBrokerOf(max370) } + +func zkBrokerOf(lks listenerKeys) *Versions { + return &Versions{lks.filter(zkBroker)} +} + +type listener uint8 + +func (l listener) has(target listener) bool { + return l&target != 0 +} + +const ( + zkBroker listener = 1 << iota + rBroker + rController +) + +type listenerKey struct { + listener listener + version int16 +} + +type listenerKeys []listenerKey + +func (lks listenerKeys) filter(listener listener) []int16 { + r := make([]int16, 0, len(lks)) + for _, lk := range lks { + if lk.listener.has(listener) { + r = append(r, lk.version) + } else { + r = append(r, -1) + } + } + return r +} + +// All requests before KRaft started being introduced support the zkBroker, but +// KRaft changed that. Kafka commit 698319b8e2c1f6cb574f339eede6f2a5b1919b55 +// added which listeners support which API keys. +func k(listeners ...listener) listenerKey { + var k listenerKey + for _, listener := range listeners { + k.listener |= listener + } + return k +} + +func (l *listenerKey) inc() { + l.version++ +} + +// For the comments below, appends are annotated with the key being introduced, +// while incs are annotated with the version the inc results in. + +func nextMax(prev listenerKeys, do func(listenerKeys) listenerKeys) listenerKeys { + return do(append(listenerKeys(nil), prev...)) +} + +var max080 = nextMax(nil, func(listenerKeys) listenerKeys { + return listenerKeys{ + k(zkBroker, rBroker), // 0 produce + k(zkBroker, rBroker, rController), // 1 fetch + k(zkBroker, rBroker), // 2 list offset + k(zkBroker, rBroker), // 3 metadata + k(zkBroker), // 4 leader and isr + k(zkBroker), // 5 stop replica + k(zkBroker), // 6 update metadata, actually not supported for a bit + k(zkBroker, rController), // 7 controlled shutdown, actually not supported for a bit + } +}) + +var max081 = nextMax(max080, func(v listenerKeys) listenerKeys { + return append(v, + k(zkBroker, rBroker), // 8 offset commit KAFKA-965 db37ed0054 + k(zkBroker, rBroker), // 9 offset fetch (same) + ) +}) + +var max082 = nextMax(max081, func(v listenerKeys) listenerKeys { + v[8].inc() // 1 offset commit KAFKA-1462 + v[9].inc() // 1 offset fetch KAFKA-1841 161b1aa16e I think? + return append(v, + k(zkBroker, rBroker), // 10 find coordinator KAFKA-1012 a670537aa3 + k(zkBroker, rBroker), // 11 join group (same) + k(zkBroker, rBroker), // 12 heartbeat (same) + ) +}) + +var max090 = nextMax(max082, func(v listenerKeys) listenerKeys { + v[0].inc() // 1 produce KAFKA-2136 436b7ddc38; KAFKA-2083 ?? KIP-13 + v[1].inc() // 1 fetch (same) + v[6].inc() // 1 update metadata KAFKA-2411 d02ca36ca1 + v[7].inc() // 1 controlled shutdown (same) + v[8].inc() // 2 offset commit KAFKA-1634 + return append(v, + k(zkBroker, rBroker), // 13 leave group KAFKA-2397 636e14a991 + k(zkBroker, rBroker), // 14 sync group KAFKA-2464 86eb74d923 + k(zkBroker, rBroker), // 15 describe groups KAFKA-2687 596c203af1 + k(zkBroker, rBroker), // 16 list groups KAFKA-2687 596c203af1 + ) +}) + +var max0100 = nextMax(max090, func(v listenerKeys) listenerKeys { + v[0].inc() // 2 produce KAFKA-3025 45c8195fa1 KIP-31 KIP-32 + v[1].inc() // 2 fetch (same) + v[3].inc() // 1 metadata KAFKA-3306 33d745e2dc + v[6].inc() // 2 update metadata KAFKA-1215 951e30adc6 + return append(v, + k(zkBroker, rBroker, rController), // 17 sasl handshake KAFKA-3149 5b375d7bf9 + k(zkBroker, rBroker, rController), // 18 api versions KAFKA-3307 8407dac6ee + ) +}) + +var max0101 = nextMax(max0100, func(v listenerKeys) listenerKeys { + v[1].inc() // 3 fetch KAFKA-2063 d04b0998c0 KIP-74 + v[2].inc() // 1 list offset KAFKA-4148 eaaa433fc9 KIP-79 + v[3].inc() // 2 metadata KAFKA-4093 ecc1fb10fa KIP-78 + v[11].inc() // 1 join group KAFKA-3888 40b1dd3f49 KIP-62 + return append(v, + k(zkBroker, rBroker, rController), // 19 create topics KAFKA-2945 fc47b9fa6b + k(zkBroker, rBroker, rController), // 20 delete topics KAFKA-2946 539633ba0e + ) +}) + +var max0102 = nextMax(max0101, func(v listenerKeys) listenerKeys { + v[6].inc() // 3 update metadata KAFKA-4565 d25671884b KIP-103 + v[19].inc() // 1 create topics KAFKA-4591 da57bc27e7 KIP-108 + return v +}) + +var max0110 = nextMax(max0102, func(v listenerKeys) listenerKeys { + v[0].inc() // 3 produce KAFKA-4816 5bd06f1d54 KIP-98 + v[1].inc() // 4 fetch (same) + v[1].inc() // 5 fetch KAFKA-4586 8b05ad406d KIP-107 + v[3].inc() // 4 metadata KAFKA-5291 7311dcbc53 (3 below) + v[9].inc() // 2 offset fetch KAFKA-3853 c2d9b95f36 KIP-98 + v[10].inc() // 1 find coordinator KAFKA-5043 d0e7c6b930 KIP-98 + v = append(v, + k(zkBroker, rBroker), // 21 delete records KAFKA-4586 see above + k(zkBroker, rBroker), // 22 init producer id KAFKA-4817 bdf4cba047 KIP-98 (raft added in KAFKA-12620 e97cff2702b6ba836c7925caa36ab18066a7c95d KIP-730) + k(zkBroker, rBroker), // 23 offset for leader epoch KAFKA-1211 0baea2ac13 KIP-101 + + k(zkBroker, rBroker), // 24 add partitions to txn KAFKA-4990 865d82af2c KIP-98 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + k(zkBroker, rBroker), // 25 add offsets to txn (same, same raft) + k(zkBroker, rBroker), // 26 end txn (same, same raft) + k(zkBroker, rBroker), // 27 write txn markers (same) + k(zkBroker, rBroker), // 28 txn offset commit (same, same raft) + + // raft broker / controller added in 5b0c58ed53c420e93957369516f34346580dac95 + k(zkBroker, rBroker, rController), // 29 describe acls KAFKA-3266 9815e18fef KIP-140 + k(zkBroker, rBroker, rController), // 30 create acls (same) + k(zkBroker, rBroker, rController), // 31 delete acls (same) + + k(zkBroker, rBroker), // 32 describe configs KAFKA-3267 972b754536 KIP-133 + k(zkBroker, rBroker, rController), // 33 alter configs (same) (raft broker 3.0 6e857c531f14d07d5b05f174e6063a124c917324, controller 273d66479dbee2398b09e478ffaf996498d1ab34) + ) + + // KAFKA-4954 0104b657a1 KIP-124 + v[2].inc() // 2 list offset (reused in e71dce89c0 KIP-98) + v[3].inc() // 3 metadata + v[8].inc() // 3 offset commit + v[9].inc() // 3 offset fetch + v[11].inc() // 2 join group + v[12].inc() // 1 heartbeat + v[13].inc() // 1 leave group + v[14].inc() // 1 sync group + v[15].inc() // 1 describe groups + v[16].inc() // 1 list group + v[18].inc() // 1 api versions + v[19].inc() // 2 create topics + v[20].inc() // 1 delete topics + + return v +}) + +var max100 = nextMax(max0110, func(v listenerKeys) listenerKeys { + v[0].inc() // 4 produce KAFKA-4763 fc93fb4b61 KIP-112 + v[1].inc() // 6 fetch (same) + v[3].inc() // 5 metadata (same) + v[4].inc() // 1 leader and isr (same) + v[6].inc() // 4 update metadata (same) + + v[0].inc() // 5 produce KAFKA-5793 94692288be + v[17].inc() // 1 sasl handshake KAFKA-4764 8fca432223 KIP-152 + + return append(v, + k(zkBroker, rBroker), // 34 alter replica log dirs KAFKA-5694 adefc8ea07 KIP-113 + k(zkBroker, rBroker), // 35 describe log dirs (same) + k(zkBroker, rBroker, rController), // 36 sasl authenticate KAFKA-4764 (see above) + k(zkBroker, rBroker, rController), // 37 create partitions KAFKA-5856 5f6393f9b1 KIP-195 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + ) +}) + +var max110 = nextMax(max100, func(v listenerKeys) listenerKeys { + v = append(v, + k(zkBroker), // 38 create delegation token KAFKA-4541 27a8d0f9e7 under KAFKA-1696 KIP-48 + k(zkBroker), // 39 renew delegation token (same) + k(zkBroker), // 40 expire delegation token (same) + k(zkBroker), // 41 describe delegation token (same) + k(zkBroker, rBroker), // 42 delete groups KAFKA-6275 1ed6da7cc8 KIP-229 + ) + + v[1].inc() // 7 fetch KAFKA-6254 7fe1c2b3d3 KIP-227 + v[32].inc() // 1 describe configs KAFKA-6241 b814a16b96 KIP-226 + + return v +}) + +var max200 = nextMax(max110, func(v listenerKeys) listenerKeys { + v[0].inc() // 6 produce KAFKA-6028 1facab387f KIP-219 + v[1].inc() // 8 fetch (same) + v[2].inc() // 3 list offset (same) + v[3].inc() // 6 metadata (same) + v[8].inc() // 4 offset commit (same) + v[9].inc() // 4 offset fetch (same) + v[10].inc() // 2 find coordinator (same) + v[11].inc() // 3 join group (same) + v[12].inc() // 2 heartbeat (same) + v[13].inc() // 2 leave group (same) + v[14].inc() // 2 sync group (same) + v[15].inc() // 2 describe groups (same) + v[16].inc() // 2 list group (same) + v[18].inc() // 2 api versions (same) + v[19].inc() // 3 create topics (same) + v[20].inc() // 2 delete topics (same) + v[21].inc() // 1 delete records (same) + v[22].inc() // 1 init producer id (same) + v[24].inc() // 1 add partitions to txn (same) + v[25].inc() // 1 add offsets to txn (same) + v[26].inc() // 1 end txn (same) + v[28].inc() // 1 txn offset commit (same) + // 29, 30, 31 bumped below, but also had throttle changes + v[32].inc() // 2 describe configs (same) + v[33].inc() // 1 alter configs (same) + v[34].inc() // 1 alter replica log dirs (same) + v[35].inc() // 1 describe log dirs (same) + v[37].inc() // 1 create partitions (same) + v[38].inc() // 1 create delegation token (same) + v[39].inc() // 1 renew delegation token (same) + v[40].inc() // 1 expire delegation token (same) + v[41].inc() // 1 describe delegation token (same) + v[42].inc() // 1 delete groups (same) + + v[29].inc() // 1 describe acls KAFKA-6841 b3aa655a70 KIP-290 + v[30].inc() // 1 create acls (same) + v[31].inc() // 1 delete acls (same) + + v[23].inc() // 1 offset for leader epoch KAFKA-6361 9679c44d2b KIP-279 + return v +}) + +var max210 = nextMax(max200, func(v listenerKeys) listenerKeys { + v[8].inc() // 5 offset commit KAFKA-4682 418a91b5d4 KIP-211 + + v[20].inc() // 3 delete topics KAFKA-5975 04770916a7 KIP-322 + + v[1].inc() // 9 fetch KAFKA-7333 05ba5aa008 KIP-320 + v[2].inc() // 4 list offset (same) + v[3].inc() // 7 metadata (same) + v[8].inc() // 6 offset commit (same) + v[9].inc() // 5 offset fetch (same) + v[23].inc() // 2 offset for leader epoch (same, also in Kafka PR #5635 79ad9026a6) + v[28].inc() // 2 txn offset commit (same) + + v[0].inc() // 7 produce KAFKA-4514 741cb761c5 KIP-110 + v[1].inc() // 10 fetch (same) + return v +}) + +var max220 = nextMax(max210, func(v listenerKeys) listenerKeys { + v[2].inc() // 5 list offset KAFKA-2334 152292994e KIP-207 + v[11].inc() // 4 join group KAFKA-7824 9a9310d074 KIP-394 + v[36].inc() // 1 sasl authenticate KAFKA-7352 e8a3bc7425 KIP-368 + + v[4].inc() // 2 leader and isr KAFKA-7235 2155c6d54b KIP-380 + v[5].inc() // 1 stop replica (same) + v[6].inc() // 5 update metadata (same) + v[7].inc() // 2 controlled shutdown (same) + + return append(v, + k(zkBroker, rBroker, rController), // 43 elect preferred leaders KAFKA-5692 269b65279c KIP-183 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + ) +}) + +var max230 = nextMax(max220, func(v listenerKeys) listenerKeys { + v[3].inc() // 8 metadata KAFKA-7922 a42f16f980 KIP-430 + v[15].inc() // 3 describe groups KAFKA-7922 f11fa5ef40 KIP-430 + + v[1].inc() // 11 fetch KAFKA-8365 e2847e8603 KIP-392 + v[23].inc() // 3 offset for leader epoch (same) + + v[11].inc() // 5 join group KAFKA-7862 0f995ba6be KIP-345 + v[8].inc() // 7 offset commit KAFKA-8225 9fa331b811 KIP-345 + v[12].inc() // 3 heartbeat (same) + v[14].inc() // 3 sync group (same) + + return append(v, + k(zkBroker, rBroker, rController), // 44 incremental alter configs KAFKA-7466 3b1524c5df KIP-339 + ) +}) + +var max240 = nextMax(max230, func(v listenerKeys) listenerKeys { + v[4].inc() // 3 leader and isr KAFKA-8345 81900d0ba0 KIP-455 + v[15].inc() // 4 describe groups KAFKA-8538 f8db022b08 KIP-345 + v[19].inc() // 4 create topics KAFKA-8305 8e161580b8 KIP-464 + v[43].inc() // 1 elect preferred leaders KAFKA-8286 121308cc7a KIP-460 + v = append(v, + // raft added in e07de97a4ce730a2755db7eeacb9b3e1f69a12c8 for the following two + k(zkBroker, rBroker, rController), // 45 alter partition reassignments KAFKA-8345 81900d0ba0 KIP-455 + k(zkBroker, rBroker, rController), // 46 list partition reassignments (same) + + k(zkBroker, rBroker), // 47 offset delete KAFKA-8730 e24d0e22ab KIP-496 + ) + + v[13].inc() // 3 leave group KAFKA-8221 74c90f46c3 KIP-345 + + // introducing flexible versions; 24 were bumped + v[3].inc() // 9 metadata KAFKA-8885 apache/kafka#7325 KIP-482 + v[4].inc() // 4 leader and isr (same) + v[5].inc() // 2 stop replica (same) + v[6].inc() // 6 update metadata (same) + v[7].inc() // 3 controlled shutdown (same) + v[8].inc() // 8 offset commit (same) + v[9].inc() // 6 offset fetch (same) + v[10].inc() // 3 find coordinator (same) + v[11].inc() // 6 join group (same) + v[12].inc() // 4 heartbeat (same) + v[13].inc() // 4 leave group (same) + v[14].inc() // 4 sync group (same) + v[15].inc() // 5 describe groups (same) + v[16].inc() // 3 list group (same) + v[18].inc() // 3 api versions (same, also KIP-511 [non-flexible fields added]) + v[19].inc() // 5 create topics (same) + v[20].inc() // 4 delete topics (same) + v[22].inc() // 2 init producer id (same) + v[38].inc() // 2 create delegation token (same) + v[42].inc() // 2 delete groups (same) + v[43].inc() // 2 elect preferred leaders (same) + v[44].inc() // 1 incremental alter configs (same) + // also 45, 46; not bumped since in same release + + // Create topics (19) was bumped up to 5 in KAFKA-8907 5d0052fe00 + // KIP-525, then 6 in the above bump, then back down to 5 once the + // tagged PR was merged (KAFKA-8932 1f1179ea64 for the bump down). + + v[0].inc() // 8 produce KAFKA-8729 f6f24c4700 KIP-467 + + return v +}) + +var max250 = nextMax(max240, func(v listenerKeys) listenerKeys { + v[22].inc() // 3 init producer id KAFKA-8710 fecb977b25 KIP-360 + v[9].inc() // 7 offset fetch KAFKA-9346 6da70f9b95 KIP-447 + + // more flexible versions, KAFKA-9420 0a2569e2b99 KIP-482 + // 6 bumped, then sasl handshake reverted later in 1a8dcffe4 + v[36].inc() // 2 sasl authenticate + v[37].inc() // 2 create partitions + v[39].inc() // 2 renew delegation token + v[40].inc() // 2 expire delegation token + v[41].inc() // 2 describe delegation token + + v[28].inc() // 3 txn offset commit KAFKA-9365 ed7c071e07f KIP-447 + + v[29].inc() // 2 describe acls KAFKA-9026 40b35178e5 KIP-482 (for flexible versions) + v[30].inc() // 2 create acls KAFKA-9027 738e14edb KIP-482 (flexible) + v[31].inc() // 2 delete acls KAFKA-9028 738e14edb KIP-482 (flexible) + + v[11].inc() // 7 join group KAFKA-9437 96c4ce480 KIP-559 + v[14].inc() // 5 sync group (same) + + return v +}) + +var max260 = nextMax(max250, func(v listenerKeys) listenerKeys { + v[21].inc() // 2 delete records KAFKA-8768 f869e33ab KIP-482 (opportunistic bump for flexible versions) + v[35].inc() // 2 describe log dirs KAFKA-9435 4f1e8331ff9 KIP-482 (same) + + v = append(v, + k(zkBroker, rBroker), // 48 describe client quotas KAFKA-7740 227a7322b KIP-546 (raft in 5964401bf9aab611bd4a072941bd1c927e044258) + k(zkBroker, rBroker, rController), // 49 alter client quotas (same) + ) + + v[5].inc() // 3 stop replica KAFKA-9539 7c7d55dbd KIP-570 + + v[16].inc() // 4 list group KAFKA-9130 fe948d39e KIP-518 + v[32].inc() // 3 describe configs KAFKA-9494 af3b8b50f2 KIP-569 + + return v +}) + +var max270 = nextMax(max260, func(v listenerKeys) listenerKeys { + // KAFKA-10163 a5ffd1ca44c KIP-599 + v[37].inc() // 3 create partitions + v[19].inc() // 6 create topics (same) + v[20].inc() // 5 delete topics (same) + + // KAFKA-9911 b937ec7567 KIP-588 + v[22].inc() // 4 init producer id + v[24].inc() // 2 add partitions to txn + v[25].inc() // 2 add offsets to txn + v[26].inc() // 2 end txn + + v = append(v, + k(zkBroker, rBroker, rController), // 50 describe user scram creds, KAFKA-10259 e8524ccd8fca0caac79b844d87e98e9c055f76fb KIP-554; 38c409cf33c kraft + k(zkBroker, rBroker, rController), // 51 alter user scram creds, same + ) + + // KAFKA-10435 634c9175054cc69d10b6da22ea1e95edff6a4747 KIP-595 + // This opted in fetch request to flexible versions. + // + // KAFKA-10487: further change in aa5263fba903c85812c0c31443f7d49ee371e9db + v[1].inc() // 12 fetch + + // KAFKA-10492 b7c8490cf47b0c18253d6a776b2b35c76c71c65d KIP-595 + // + // These are the first requests that are raft only. + v = append(v, + k(rController), // 52 vote + k(rController), // 53 begin quorum epoch + k(rController), // 54 end quorum epoch + k(rBroker, rController), // 55 describe quorum + ) + + // KAFKA-8836 57de67db22eb373f92ec5dd449d317ed2bc8b8d1 KIP-497 + v = append(v, + k(zkBroker, rController), // 56 alter isr + ) + + // KAFKA-10028 fb4f297207ef62f71e4a6d2d0dac75752933043d KIP-584 + return append(v, + k(zkBroker, rBroker, rController), // 57 update features (rbroker 3.0 6e857c531f14d07d5b05f174e6063a124c917324; rcontroller 3.2 55ff5d360381af370fe5b3a215831beac49571a4 KIP-778 KAFKA-13823) + ) +}) + +var max280 = nextMax(max270, func(v listenerKeys) listenerKeys { + // KAFKA-10181 KAFKA-10181 KIP-590 + v = append(v, + k(zkBroker, rController), // 58 envelope, controller first, zk in KAFKA-14446 8b045dcbf6b89e1a9594ff95642d4882765e4b0d KIP-866 Kafka 3.4 + ) + + // KAFKA-10729 85f94d50271c952c3e9ee49c4fc814c0da411618 KIP-482 + // (flexible bumps) + v[0].inc() // 9 produce + v[2].inc() // 6 list offsets + v[23].inc() // 4 offset for leader epoch + v[24].inc() // 3 add partitions to txn + v[25].inc() // 3 add offsets to txn + v[26].inc() // 3 end txn + v[27].inc() // 1 write txn markers + v[32].inc() // 4 describe configs + v[33].inc() // 2 alter configs + v[34].inc() // 2 alter replica log dirs + v[48].inc() // 1 describe client quotas + v[49].inc() // 1 alter client quotas + + // KAFKA-10547 5c921afa4a593478f7d1c49e5db9d787558d0d5e KIP-516 + v[3].inc() // 10 metadata + v[6].inc() // 7 update metadata + + // KAFKA-10545 1dd1e7f945d7a8c1dc177223cd88800680f1ff46 KIP-516 + v[4].inc() // 5 leader and isr + + // KAFKA-10427 2023aed59d863278a6302e03066d387f994f085c KIP-630 + v = append(v, + k(rController), // 59 fetch snapshot + ) + + // KAFKA-12204 / KAFKA-10851 302eee63c479fd4b955c44f1058a5e5d111acb57 KIP-700 + v = append(v, + k(zkBroker, rBroker, rController), // 60 describe cluster; rController in KAFKA-15396 41b695b6e30baa4243d9ca4f359b833e17ed0e77 KIP-919 + ) + + // KAFKA-12212 7a1d1d9a69a241efd68e572badee999229b3942f KIP-700 + v[3].inc() // 11 metadata + + // KAFKA-10764 4f588f7ca2a1c5e8dd845863da81425ac69bac92 KIP-516 + v[19].inc() // 7 create topics + v[20].inc() // 6 delete topics + + // KAFKA-12238 e9edf104866822d9e6c3b637ffbf338767b5bf27 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 61 describe producers + ) + + // KAFKA-12248 a022072df3c8175950c03263d2bbf2e3ea7a7a5d KIP-500 + // (commit mentions KIP-500, these are actually described in KIP-631) + // Broker registration was later updated in d9bb2ef596343da9402bff4903b129cff1f7c22b + v = append(v, + k(rController), // 62 broker registration + k(rController), // 63 broker heartbeat + ) + + // KAFKA-12249 3f36f9a7ca153a9d221f6bedeb7d1503aa18eff1 KIP-500 / KIP-631 + // Renamed from Decommission to Unregister in 06dce721ec0185d49fac37775dbf191d0e80e687 + v = append(v, + // kraft broker added in 7143267f71ca0c14957d8560fbc42a5f8aac564d + k(rBroker, rController), // 64 unregister broker + ) + return v +}) + +var max300 = nextMax(max280, func(v listenerKeys) listenerKeys { + // KAFKA-12267 3f09fb97b6943c0612488dfa8e5eab8078fd7ca0 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 65 describe transactions + ) + + // KAFKA-12369 3708a7c6c1ecf1304f091dda1e79ae53ba2df489 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 66 list transactions + ) + + // KAFKA-12620 72d108274c98dca44514007254552481c731c958 KIP-730 + // raft broker added in e97cff2702b6ba836c7925caa36ab18066a7c95d + v = append(v, + k(zkBroker, rController), // 67 allocate producer ids + ) + + // KAFKA-12541 bd72ef1bf1e40feb3bc17349a385b479fa5fa530 KIP-734 + v[2].inc() // 7 list offsets + + // KAFKA-12663 f5d5f654db359af077088685e29fbe5ea69616cf KIP-699 + v[10].inc() // 4 find coordinator + + // KAFKA-12234 e00c0f3719ad0803620752159ef8315d668735d6 KIP-709 + v[9].inc() // 8 offset fetch + + return v +}) + +var max310 = nextMax(max300, func(v listenerKeys) listenerKeys { + // KAFKA-10580 2b8aff58b575c199ee8372e5689420c9d77357a5 KIP-516 + v[1].inc() // 13 fetch + + // KAFKA-10744 1d22b0d70686aef5689b775ea2ea7610a37f3e8c KIP-516 + v[3].inc() // 12 metadata + + return v +}) + +var max320 = nextMax(max310, func(v listenerKeys) listenerKeys { + // KAFKA-13495 69645f1fe5103adb00de6fa43152e7df989f3aea KIP-800 + v[11].inc() // 8 join group + + // KAFKA-13496 bf609694f83931990ce63e0123f811e6475820c5 KIP-800 + v[13].inc() // 5 leave group + + // KAFKA-13527 31fca1611a6780e8a8aa3ac21618135201718e32 KIP-784 + v[35].inc() // 3 describe log dirs + + // KAFKA-13435 c8fbe26f3bd3a7c018e7619deba002ee454208b9 KIP-814 + v[11].inc() // 9 join group + + // KAFKA-13587 52621613fd386203773ba93903abd50b46fa093a KIP-704 + v[4].inc() // 6 leader and isr + v[56].inc() // 1 alter isr => alter partition + + return v +}) + +var max330 = nextMax(max320, func(v listenerKeys) listenerKeys { + // KAFKA-13823 55ff5d360381af370fe5b3a215831beac49571a4 KIP-778 + v[57].inc() // 1 update features + + // KAFKA-13958 4fcfd9ddc4a8da3d4cfbb69268c06763352e29a9 KIP-827 + v[35].inc() // 4 describe log dirs + + // KAFKA-841 f83d95d9a28 KIP-841 + v[56].inc() // 2 alter partition + + // KAFKA-13888 a126e3a622f KIP-836 + v[55].inc() // 1 describe quorum + + // KAFKA-6945 d65d8867983 KIP-373 + v[29].inc() // 3 describe acls + v[30].inc() // 3 create acls + v[31].inc() // 3 delete acls + v[38].inc() // 3 create delegation token + v[41].inc() // 3 describe delegation token + + return v +}) + +var max340 = nextMax(max330, func(v listenerKeys) listenerKeys { + // KAFKA-14304 7b7e40a536a79cebf35cc278b9375c8352d342b9 KIP-866 + // KAFKA-14448 67c72596afe58363eceeb32084c5c04637a33831 added BrokerRegistration + // KAFKA-14493 db490707606855c265bc938e1b236070e0e2eba5 changed BrokerRegistration + // KAFKA-14304 0bb05d8679b684ad8fbb2eb40dfc00066186a75a changed BrokerRegistration back to a bool... + // 5b521031edea8ea7cbcca7dc24a58429423740ff added tag to ApiVersions + v[4].inc() // 7 leader and isr + v[5].inc() // 4 stop replica + v[6].inc() // 8 update metadata + v[62].inc() // 1 broker registration + return v +}) + +var max350 = nextMax(max340, func(v listenerKeys) listenerKeys { + // KAFKA-13369 7146ac57ba9ddd035dac992b9f188a8e7677c08d KIP-405 + v[1].inc() // 14 fetch + v[2].inc() // 8 list offsets + + v[1].inc() // 15 fetch // KAFKA-14617 79b5f7f1ce2 KIP-903 + v[56].inc() // 3 alter partition // KAFKA-14617 8c88cdb7186b1d594f991eb324356dcfcabdf18a KIP-903 + return v +}) + +var max360 = nextMax(max350, func(v listenerKeys) listenerKeys { + // KAFKA-14402 29a1a16668d76a1cc04ec9e39ea13026f2dce1de KIP-890 + // Later commit swapped to stable + v[24].inc() // 4 add partitions to txn + return v +}) + +var max370 = nextMax(max360, func(v listenerKeys) listenerKeys { + // KAFKA-15661 c8f687ac1505456cb568de2b60df235eb1ceb5f0 KIP-951 + v[0].inc() // 10 produce + v[1].inc() // 16 fetch + + // 7826d5fc8ab695a5ad927338469ddc01b435a298 KIP-848 + // (change introduced in 3.6 but was marked unstable and not visible) + v[8].inc() // 9 offset commit + // KAFKA-14499 7054625c45dc6edb3c07271fe4a6c24b4638424f KIP-848 (and prior) + v[9].inc() // 9 offset fetch + + // KAFKA-15368 41b695b6e30baa4243d9ca4f359b833e17ed0e77 KIP-919 + // (added rController as well, see above) + v[60].inc() // 1 describe cluster + + // KAFKA-14391 3be7f7d611d0786f2f98159d5c7492b0d94a2bb7 KIP-848 + // as well as some patches following + v = append(v, + k(zkBroker, rBroker), // 68 consumer group heartbeat + ) + + return v +}) + +var ( + maxStable = max370 + maxTip = nextMax(maxStable, func(v listenerKeys) listenerKeys { + return v + }) +) diff --git a/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go b/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go new file mode 100644 index 000000000000..dd85a02a188d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go @@ -0,0 +1,41 @@ +// Package sasl specifies interfaces that any SASL authentication must provide +// to interop with Kafka SASL. +package sasl + +import "context" + +// Session is an authentication session. +type Session interface { + // Challenge is called with a server response. This must return + // if the authentication is done, or, if not, the next message + // to send. If the authentication is done, this can return an + // additional last message to be written (for which we will not + // read a response). + // + // Returning an error stops the authentication flow. + Challenge([]byte) (bool, []byte, error) +} + +// Mechanism authenticates with SASL. +type Mechanism interface { + // Name is the name of this SASL authentication mechanism. + Name() string + + // Authenticate initializes an authentication session to the provided + // host:port. If the mechanism is a client-first authentication + // mechanism, this also returns the first message to write. + // + // If initializing a session fails, this can return an error to stop + // the authentication flow. + // + // The provided context can be used through the duration of the session. + Authenticate(ctx context.Context, host string) (Session, []byte, error) +} + +// ClosingMechanism is an optional interface for SASL mechanisms. Implementing +// this interface signals that the mechanism should be closed if it will never +// be used again. +type ClosingMechanism interface { + // Close permanently closes a mechanism. + Close() +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE b/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE new file mode 100644 index 000000000000..36e18034325d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/README.md b/vendor/github.com/twmb/franz-go/plugin/kprom/README.md new file mode 100644 index 000000000000..5c0db3b04475 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/README.md @@ -0,0 +1,42 @@ +kprom +=== + +kprom is a plug-in package to provide prometheus +[metrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) +through a +[`kgo.Hook`](https://pkg.go.dev/github.com/twmb/franz-go/pkg/kgo#Hook). + +This package tracks the following metrics under the following names, all +metrics being counter vecs: + +```go +#{ns}_connects_total{node_id="#{node}"} +#{ns}_connect_errors_total{node_id="#{node}"} +#{ns}_write_errors_total{node_id="#{node}"} +#{ns}_write_bytes_total{node_id="#{node}"} +#{ns}_read_errors_total{node_id="#{node}"} +#{ns}_read_bytes_total{node_id="#{node}"} +#{ns}_produce_bytes_total{node_id="#{node}",topic="#{topic}"} +#{ns}_fetch_bytes_total{node_id="#{node}",topic="#{topic}"} +#{ns}_buffered_produce_records_total +#{ns}_buffered_fetch_records_total +``` + +The above metrics can be expanded considerably with options in this package, +allowing timings, uncompressed and compressed bytes, and different labels. + +Note that seed brokers use broker IDs prefixed with "seed_", with the number +corresponding to which seed it is. + +To use, + +```go +metrics := kprom.NewMetrics("namespace") +cl, err := kgo.NewClient( + kgo.WithHooks(metrics), + // ...other opts +) +``` + +You can use your own prometheus registry, as well as a few other options. +See the package [documentation](https://pkg.go.dev/github.com/twmb/franz-go/plugin/kprom) for more info! diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/config.go b/vendor/github.com/twmb/franz-go/plugin/kprom/config.go new file mode 100644 index 000000000000..d907bebe5228 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/config.go @@ -0,0 +1,233 @@ +package kprom + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type cfg struct { + namespace string + subsystem string + + reg prometheus.Registerer + gatherer prometheus.Gatherer + + withClientLabel bool + histograms map[Histogram][]float64 + defBuckets []float64 + fetchProduceOpts fetchProduceOpts + + handlerOpts promhttp.HandlerOpts + goCollectors bool +} + +func newCfg(namespace string, opts ...Opt) cfg { + regGatherer := RegistererGatherer(prometheus.NewRegistry()) + cfg := cfg{ + namespace: namespace, + reg: regGatherer, + gatherer: regGatherer, + + defBuckets: DefBuckets, + fetchProduceOpts: fetchProduceOpts{ + uncompressedBytes: true, + labels: []string{"node_id", "topic"}, + }, + } + + for _, opt := range opts { + opt.apply(&cfg) + } + + if cfg.goCollectors { + cfg.reg.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + cfg.reg.MustRegister(prometheus.NewGoCollector()) + } + + return cfg +} + +// Opt is an option to configure Metrics. +type Opt interface { + apply(*cfg) +} + +type opt struct{ fn func(*cfg) } + +func (o opt) apply(c *cfg) { o.fn(c) } + +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry sets the registerer and gatherer to add metrics to, rather than a +// new registry. Use this option if you want to configure both Gatherer and +// Registerer with the same object. +func Registry(rg RegistererGatherer) Opt { + return opt{func(c *cfg) { + c.reg = rg + c.gatherer = rg + }} +} + +// Registerer sets the registerer to add register to, rather than a new registry. +func Registerer(reg prometheus.Registerer) Opt { + return opt{func(c *cfg) { c.reg = reg }} +} + +// Gatherer sets the gatherer to add gather to, rather than a new registry. +func Gatherer(gatherer prometheus.Gatherer) Opt { + return opt{func(c *cfg) { c.gatherer = gatherer }} +} + +// GoCollectors adds the prometheus.NewProcessCollector and +// prometheus.NewGoCollector collectors the the Metric's registry. +func GoCollectors() Opt { + return opt{func(c *cfg) { c.goCollectors = true }} +} + +// HandlerOpts sets handler options to use if you wish you use the +// Metrics.Handler function. +// +// This is only useful if you both (a) do not want to provide your own registry +// and (b) want to override the default handler options. +func HandlerOpts(opts promhttp.HandlerOpts) Opt { + return opt{func(c *cfg) { c.handlerOpts = opts }} +} + +// WithClientLabel adds a "cliend_id" label to all metrics. +func WithClientLabel() Opt { + return opt{func(c *cfg) { c.withClientLabel = true }} +} + +// Subsystem sets the subsystem for the kprom metrics, overriding the default +// empty string. +func Subsystem(ss string) Opt { + return opt{func(c *cfg) { c.subsystem = ss }} +} + +// Buckets sets the buckets to be used with Histograms, overriding the default +// of [kprom.DefBuckets]. If custom buckets per histogram is needed, +// HistogramOpts can be used. +func Buckets(buckets []float64) Opt { + return opt{func(c *cfg) { c.defBuckets = buckets }} +} + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the kafka timings (in seconds). +var DefBuckets = []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048} + +// A Histogram is an identifier for a kprom histogram that can be enabled +type Histogram uint8 + +const ( + ReadWait Histogram = iota // Enables {ns}_{ss}_read_wait_seconds. + ReadTime // Enables {ns}_{ss}_read_time_seconds. + WriteWait // Enables {ns}_{ss}_write_wait_seconds. + WriteTime // Enables {ns}_{ss}_write_time_seconds. + RequestDurationE2E // Enables {ns}_{ss}_request_durationE2E_seconds. + RequestThrottled // Enables {ns}_{ss}_request_throttled_seconds. +) + +// HistogramOpts allows histograms to be enabled with custom buckets +type HistogramOpts struct { + Enable Histogram + Buckets []float64 +} + +// HistogramsFromOpts allows the user full control of what histograms to enable +// and define buckets to be used with each histogram. +// +// metrics, _ := kprom.NewMetrics( +// kprom.HistogramsFromOpts( +// kprom.HistogramOpts{ +// Enable: kprom.ReadWait, +// Buckets: prometheus.LinearBuckets(10, 10, 8), +// }, +// kprom.HistogramOpts{ +// Enable: kprom.ReadeTime, +// // kprom default bucket will be used +// }, +// ), +// ) +func HistogramsFromOpts(hs ...HistogramOpts) Opt { + return opt{func(c *cfg) { + c.histograms = make(map[Histogram][]float64) + for _, h := range hs { + c.histograms[h.Enable] = h.Buckets + } + }} +} + +// Histograms sets the histograms to be enabled for kprom, overiding the +// default of disabling all histograms. +// +// metrics, _ := kprom.NewMetrics( +// kprom.Histograms( +// kprom.RequestDurationE2E, +// ), +// ) +func Histograms(hs ...Histogram) Opt { + hos := make([]HistogramOpts, 0) + for _, h := range hs { + hos = append(hos, HistogramOpts{Enable: h}) + } + return HistogramsFromOpts(hos...) +} + +// A Detail is a label that can be set on fetch/produce metrics +type Detail uint8 + +const ( + ByNode Detail = iota // Include label "node_id" for fetch and produce metrics. + ByTopic // Include label "topic" for fetch and produce metrics. + Batches // Report number of fetched and produced batches. + Records // Report the number of fetched and produced records. + CompressedBytes // Report the number of fetched and produced compressed bytes. + UncompressedBytes // Report the number of fetched and produced uncompressed bytes. + ConsistentNaming // Renames {fetch,produce}_bytes_total to {fetch,produce}_uncompressed_bytes_total, making the names consistent with the CompressedBytes detail. +) + +type fetchProduceOpts struct { + labels []string + batches bool + records bool + compressedBytes bool + uncompressedBytes bool + consistentNaming bool +} + +// FetchAndProduceDetail determines details for fetch/produce metrics, +// overriding the default of (UncompressedBytes, ByTopic, ByNode). +func FetchAndProduceDetail(details ...Detail) Opt { + return opt{ + func(c *cfg) { + labelsDeduped := make(map[Detail]string) + c.fetchProduceOpts = fetchProduceOpts{} + for _, l := range details { + switch l { + case ByTopic: + labelsDeduped[ByTopic] = "topic" + case ByNode: + labelsDeduped[ByNode] = "node_id" + case Batches: + c.fetchProduceOpts.batches = true + case Records: + c.fetchProduceOpts.records = true + case UncompressedBytes: + c.fetchProduceOpts.uncompressedBytes = true + case CompressedBytes: + c.fetchProduceOpts.compressedBytes = true + case ConsistentNaming: + c.fetchProduceOpts.consistentNaming = true + } + } + var labels []string + for _, l := range labelsDeduped { + labels = append(labels, l) + } + c.fetchProduceOpts.labels = labels + }, + } +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go b/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go new file mode 100644 index 000000000000..9d6a47004872 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go @@ -0,0 +1,510 @@ +// Package kprom provides prometheus plug-in metrics for a kgo client. +// +// This package tracks the following metrics under the following names, +// all metrics being counter vecs: +// +// #{ns}_connects_total{node_id="#{node}"} +// #{ns}_connect_errors_total{node_id="#{node}"} +// #{ns}_write_errors_total{node_id="#{node}"} +// #{ns}_write_bytes_total{node_id="#{node}"} +// #{ns}_read_errors_total{node_id="#{node}"} +// #{ns}_read_bytes_total{node_id="#{node}"} +// #{ns}_produce_bytes_total{node_id="#{node}",topic="#{topic}"} +// #{ns}_fetch_bytes_total{node_id="#{node}",topic="#{topic}"} +// #{ns}_buffered_produce_records_total +// #{ns}_buffered_fetch_records_total +// +// The above metrics can be expanded considerably with options in this package, +// allowing timings, uncompressed and compressed bytes, and different labels. +// +// This can be used in a client like so: +// +// m := kprom.NewMetrics("my_namespace") +// cl, err := kgo.NewClient( +// kgo.WithHooks(m), +// // ...other opts +// ) +// +// More examples are linked in the main project readme: https://github.com/twmb/franz-go/#metrics--logging +// +// By default, metrics are installed under the a new prometheus registry, but +// this can be overridden with the Registry option. +// +// Note that seed brokers use broker IDs prefixed with "seed_", with the number +// corresponding to which seed it is. +package kprom + +import ( + "net" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/twmb/franz-go/pkg/kgo" +) + +var ( // interface checks to ensure we implement the hooks properly + _ kgo.HookBrokerConnect = new(Metrics) + _ kgo.HookBrokerDisconnect = new(Metrics) + _ kgo.HookBrokerWrite = new(Metrics) + _ kgo.HookBrokerRead = new(Metrics) + _ kgo.HookProduceBatchWritten = new(Metrics) + _ kgo.HookFetchBatchRead = new(Metrics) + _ kgo.HookBrokerE2E = new(Metrics) + _ kgo.HookBrokerThrottle = new(Metrics) + _ kgo.HookNewClient = new(Metrics) + _ kgo.HookClientClosed = new(Metrics) +) + +// Metrics provides prometheus metrics +type Metrics struct { + cfg cfg + + // Connection + connConnectsTotal *prometheus.CounterVec + connConnectErrorsTotal *prometheus.CounterVec + connDisconnectsTotal *prometheus.CounterVec + + // Write + writeBytesTotal *prometheus.CounterVec + writeErrorsTotal *prometheus.CounterVec + writeWaitSeconds *prometheus.HistogramVec + writeTimeSeconds *prometheus.HistogramVec + + // Read + readBytesTotal *prometheus.CounterVec + readErrorsTotal *prometheus.CounterVec + readWaitSeconds *prometheus.HistogramVec + readTimeSeconds *prometheus.HistogramVec + + // Request E2E & Throttle + requestDurationE2ESeconds *prometheus.HistogramVec + requestThrottledSeconds *prometheus.HistogramVec + + // Produce + produceCompressedBytes *prometheus.CounterVec + produceUncompressedBytes *prometheus.CounterVec + produceBatchesTotal *prometheus.CounterVec + produceRecordsTotal *prometheus.CounterVec + + // Fetch + fetchCompressedBytes *prometheus.CounterVec + fetchUncompressedBytes *prometheus.CounterVec + fetchBatchesTotal *prometheus.CounterVec + fetchRecordsTotal *prometheus.CounterVec + + // Buffered + bufferedFetchRecords prometheus.GaugeFunc + bufferedProduceRecords prometheus.GaugeFunc +} + +// NewMetrics returns a new Metrics that adds prometheus metrics to the +// registry under the given namespace. +func NewMetrics(namespace string, opts ...Opt) *Metrics { + return &Metrics{cfg: newCfg(namespace, opts...)} +} + +// Registry returns the prometheus registry that metrics were added to. +// +// This is useful if you want the Metrics type to create its own registry for +// you to add additional metrics to. +func (m *Metrics) Registry() prometheus.Registerer { + return m.cfg.reg +} + +// Handler returns an http.Handler providing prometheus metrics. +func (m *Metrics) Handler() http.Handler { + return promhttp.HandlerFor(m.cfg.gatherer, m.cfg.handlerOpts) +} + +// OnNewClient implements the HookNewClient interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnNewClient(client *kgo.Client) { + var ( + factory = promauto.With(m.cfg.reg) + namespace = m.cfg.namespace + subsystem = m.cfg.subsystem + constLabels prometheus.Labels + ) + if m.cfg.withClientLabel { + constLabels = make(prometheus.Labels) + constLabels["client_id"] = client.OptValue(kgo.ClientID).(string) + } + + // returns Hist buckets if set, otherwise defBucket + getHistogramBuckets := func(h Histogram) []float64 { + if buckets, ok := m.cfg.histograms[h]; ok && len(buckets) != 0 { + return buckets + } + return m.cfg.defBuckets + } + + // Connection + + m.connConnectsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "connects_total", + Help: "Total number of connections opened", + }, []string{"node_id"}) + + m.connConnectErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "connect_errors_total", + Help: "Total number of connection errors", + }, []string{"node_id"}) + + m.connDisconnectsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "disconnects_total", + Help: "Total number of connections closed", + }, []string{"node_id"}) + + // Write + + m.writeBytesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_bytes_total", + Help: "Total number of bytes written", + }, []string{"node_id"}) + + m.writeErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_errors_total", + Help: "Total number of write errors", + }, []string{"node_id"}) + + m.writeWaitSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_wait_seconds", + Help: "Time spent waiting to write to Kafka", + Buckets: getHistogramBuckets(WriteWait), + }, []string{"node_id"}) + + m.writeTimeSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_time_seconds", + Help: "Time spent writing to Kafka", + Buckets: getHistogramBuckets(WriteTime), + }, []string{"node_id"}) + + // Read + + m.readBytesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_bytes_total", + Help: "Total number of bytes read", + }, []string{"node_id"}) + + m.readErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_errors_total", + Help: "Total number of read errors", + }, []string{"node_id"}) + + m.readWaitSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_wait_seconds", + Help: "Time spent waiting to read from Kafka", + Buckets: getHistogramBuckets(ReadWait), + }, []string{"node_id"}) + + m.readTimeSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_time_seconds", + Help: "Time spent reading from Kafka", + Buckets: getHistogramBuckets(ReadTime), + }, []string{"node_id"}) + + // Request E2E duration & Throttle + + m.requestDurationE2ESeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "request_duration_e2e_seconds", + Help: "Time from the start of when a request is written to the end of when the response for that request was fully read", + Buckets: getHistogramBuckets(RequestDurationE2E), + }, []string{"node_id"}) + + m.requestThrottledSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "request_throttled_seconds", + Help: "Time the request was throttled", + Buckets: getHistogramBuckets(RequestThrottled), + }, []string{"node_id"}) + + // Produce + + m.produceCompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_compressed_bytes_total", + Help: "Total number of compressed bytes produced", + }, m.cfg.fetchProduceOpts.labels) + + produceUncompressedBytesName := "produce_bytes_total" + if m.cfg.fetchProduceOpts.consistentNaming { + produceUncompressedBytesName = "produce_uncompressed_bytes_total" + } + m.produceUncompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: produceUncompressedBytesName, + Help: "Total number of uncompressed bytes produced", + }, m.cfg.fetchProduceOpts.labels) + + m.produceBatchesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_batches_total", + Help: "Total number of batches produced", + }, m.cfg.fetchProduceOpts.labels) + + m.produceRecordsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_records_total", + Help: "Total number of records produced", + }, m.cfg.fetchProduceOpts.labels) + + // Fetch + + m.fetchCompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_compressed_bytes_total", + Help: "Total number of compressed bytes fetched", + }, m.cfg.fetchProduceOpts.labels) + + fetchUncompressedBytesName := "fetch_bytes_total" + if m.cfg.fetchProduceOpts.consistentNaming { + fetchUncompressedBytesName = "fetch_uncompressed_bytes_total" + } + m.fetchUncompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: fetchUncompressedBytesName, + Help: "Total number of uncompressed bytes fetched", + }, m.cfg.fetchProduceOpts.labels) + + m.fetchBatchesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_batches_total", + Help: "Total number of batches fetched", + }, m.cfg.fetchProduceOpts.labels) + + m.fetchRecordsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_records_total", + Help: "Total number of records fetched", + }, m.cfg.fetchProduceOpts.labels) + + // Buffers + + m.bufferedProduceRecords = factory.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "buffered_produce_records_total", + Help: "Total number of records buffered within the client ready to be produced", + }, + func() float64 { return float64(client.BufferedProduceRecords()) }, + ) + + m.bufferedFetchRecords = factory.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "buffered_fetch_records_total", + Help: "Total number of records buffered within the client ready to be consumed", + }, + func() float64 { return float64(client.BufferedFetchRecords()) }, + ) +} + +// OnClientClosed will unregister kprom metrics from kprom registerer +func (m *Metrics) OnClientClosed(*kgo.Client) { + _ = m.cfg.reg.Unregister(m.connConnectsTotal) + _ = m.cfg.reg.Unregister(m.connConnectErrorsTotal) + _ = m.cfg.reg.Unregister(m.connDisconnectsTotal) + _ = m.cfg.reg.Unregister(m.writeBytesTotal) + _ = m.cfg.reg.Unregister(m.writeErrorsTotal) + _ = m.cfg.reg.Unregister(m.writeWaitSeconds) + _ = m.cfg.reg.Unregister(m.writeTimeSeconds) + _ = m.cfg.reg.Unregister(m.readBytesTotal) + _ = m.cfg.reg.Unregister(m.readErrorsTotal) + _ = m.cfg.reg.Unregister(m.readWaitSeconds) + _ = m.cfg.reg.Unregister(m.readTimeSeconds) + _ = m.cfg.reg.Unregister(m.requestDurationE2ESeconds) + _ = m.cfg.reg.Unregister(m.requestThrottledSeconds) + _ = m.cfg.reg.Unregister(m.produceCompressedBytes) + _ = m.cfg.reg.Unregister(m.produceUncompressedBytes) + _ = m.cfg.reg.Unregister(m.produceBatchesTotal) + _ = m.cfg.reg.Unregister(m.produceRecordsTotal) + _ = m.cfg.reg.Unregister(m.fetchCompressedBytes) + _ = m.cfg.reg.Unregister(m.fetchUncompressedBytes) + _ = m.cfg.reg.Unregister(m.fetchBatchesTotal) + _ = m.cfg.reg.Unregister(m.fetchRecordsTotal) + _ = m.cfg.reg.Unregister(m.bufferedFetchRecords) + _ = m.cfg.reg.Unregister(m.bufferedProduceRecords) +} + +// OnBrokerConnect implements the HookBrokerConnect interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { + nodeId := kgo.NodeName(meta.NodeID) + if err != nil { + m.connConnectErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.connConnectsTotal.WithLabelValues(nodeId).Inc() +} + +// OnBrokerDisconnect implements the HookBrokerDisconnect interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { + nodeId := kgo.NodeName(meta.NodeID) + m.connDisconnectsTotal.WithLabelValues(nodeId).Inc() +} + +// OnBrokerThrottle implements the HookBrokerThrottle interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { + if _, ok := m.cfg.histograms[RequestThrottled]; ok { + nodeId := kgo.NodeName(meta.NodeID) + m.requestThrottledSeconds.WithLabelValues(nodeId).Observe(throttleInterval.Seconds()) + } +} + +// OnProduceBatchWritten implements the HookProduceBatchWritten interface for +// metrics gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, metrics kgo.ProduceBatchMetrics) { + labels := m.fetchProducerLabels(kgo.NodeName(meta.NodeID), topic) + if m.cfg.fetchProduceOpts.uncompressedBytes { + m.produceUncompressedBytes.With(labels).Add(float64(metrics.UncompressedBytes)) + } + if m.cfg.fetchProduceOpts.compressedBytes { + m.produceCompressedBytes.With(labels).Add(float64(metrics.CompressedBytes)) + } + if m.cfg.fetchProduceOpts.batches { + m.produceBatchesTotal.With(labels).Inc() + } + if m.cfg.fetchProduceOpts.records { + m.produceRecordsTotal.With(labels).Add(float64(metrics.NumRecords)) + } +} + +// OnFetchBatchRead implements the HookFetchBatchRead interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, metrics kgo.FetchBatchMetrics) { + labels := m.fetchProducerLabels(kgo.NodeName(meta.NodeID), topic) + if m.cfg.fetchProduceOpts.uncompressedBytes { + m.fetchUncompressedBytes.With(labels).Add(float64(metrics.UncompressedBytes)) + } + if m.cfg.fetchProduceOpts.compressedBytes { + m.fetchCompressedBytes.With(labels).Add(float64(metrics.CompressedBytes)) + } + if m.cfg.fetchProduceOpts.batches { + m.fetchBatchesTotal.With(labels).Inc() + } + if m.cfg.fetchProduceOpts.records { + m.fetchRecordsTotal.With(labels).Add(float64(metrics.NumRecords)) + } +} + +// // Nop hook for compat, logic moved to OnBrokerE2E +func (m *Metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, _, _ time.Duration, err error) { +} + +// Nop hook for compat, logic moved to OnBrokerE2E +func (m *Metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, _, _ time.Duration, err error) { +} + +// OnBrokerE2E implements the HookBrokerE2E interface for metrics gathering +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerE2E(meta kgo.BrokerMetadata, _ int16, e2e kgo.BrokerE2E) { + nodeId := kgo.NodeName(meta.NodeID) + if e2e.WriteErr != nil { + m.writeErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.writeBytesTotal.WithLabelValues(nodeId).Add(float64(e2e.BytesWritten)) + if _, ok := m.cfg.histograms[WriteWait]; ok { + m.writeWaitSeconds.WithLabelValues(nodeId).Observe(e2e.WriteWait.Seconds()) + } + if _, ok := m.cfg.histograms[WriteTime]; ok { + m.writeTimeSeconds.WithLabelValues(nodeId).Observe(e2e.TimeToWrite.Seconds()) + } + if e2e.ReadErr != nil { + m.readErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.readBytesTotal.WithLabelValues(nodeId).Add(float64(e2e.BytesRead)) + if _, ok := m.cfg.histograms[ReadWait]; ok { + m.readWaitSeconds.WithLabelValues(nodeId).Observe(e2e.ReadWait.Seconds()) + } + if _, ok := m.cfg.histograms[ReadTime]; ok { + m.readTimeSeconds.WithLabelValues(nodeId).Observe(e2e.TimeToRead.Seconds()) + } + if _, ok := m.cfg.histograms[RequestDurationE2E]; ok { + m.requestDurationE2ESeconds.WithLabelValues(nodeId).Observe(e2e.DurationE2E().Seconds()) + } +} + +func (m *Metrics) fetchProducerLabels(nodeId, topic string) prometheus.Labels { + labels := make(prometheus.Labels, 2) + for _, l := range m.cfg.fetchProduceOpts.labels { + switch l { + case "topic": + labels[l] = topic + case "node_id": + labels[l] = nodeId + } + } + return labels +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b68697db9182..b5df1af5a9ea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1366,7 +1366,7 @@ github.com/oschwald/geoip2-golang # github.com/oschwald/maxminddb-golang v1.13.0 ## explicit; go 1.21 github.com/oschwald/maxminddb-golang -# github.com/pierrec/lz4/v4 v4.1.18 +# github.com/pierrec/lz4/v4 v4.1.21 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -1602,6 +1602,21 @@ github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.6.1 ## explicit; go 1.13 github.com/tklauser/numcpus +# github.com/twmb/franz-go v1.17.1 +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kbin +github.com/twmb/franz-go/pkg/kerr +github.com/twmb/franz-go/pkg/kgo +github.com/twmb/franz-go/pkg/kgo/internal/sticky +github.com/twmb/franz-go/pkg/kversion +github.com/twmb/franz-go/pkg/sasl +# github.com/twmb/franz-go/pkg/kmsg v1.8.0 +## explicit; go 1.19 +github.com/twmb/franz-go/pkg/kmsg +github.com/twmb/franz-go/pkg/kmsg/internal/kbin +# github.com/twmb/franz-go/plugin/kprom v1.1.0 +## explicit; go 1.18 +github.com/twmb/franz-go/plugin/kprom # github.com/uber/jaeger-client-go v2.30.0+incompatible ## explicit github.com/uber/jaeger-client-go