-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
122 lines (117 loc) · 4.35 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
---
version: "3"
services:
zookeeper1:
image: confluentinc/cp-zookeeper:5.5.1
hostname: zookeeper1
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SERVERS: zookeeper1:2888:3888
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: localhost
ports:
- 2181:2181
kafka1:
image: confluentinc/cp-kafka:5.5.1
hostname: kafka1
depends_on:
- zookeeper1
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
KAFKA_LISTENERS: PLAINTEXT://kafka1:19091, EXTERNAL://kafka1:9091
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:19091, EXTERNAL://localhost:9091
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka1
KAFKA_RACK: 0
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
ports:
- 9091:9091
- 19091:19091
minio1:
image: minio/minio
hostname: minio1
volumes:
- minio1-data:/export
ports:
- 9001:9000
environment:
MINIO_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE
MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
command: server /export --compat
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
schema-registry:
image: confluentinc/cp-schema-registry:5.5.1
ports:
- "8081:8081"
depends_on:
- zookeeper1
- kafka1
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
# no compatibility makes testing easier, do not use in prod
SCHEMA_REGISTRY_AVRO_COMPATIBILITY_LEVEL: none
connect1:
image: confluentinc/cp-kafka-connect:5.5.1
depends_on:
- zookeeper1
- kafka1
- schema-registry
ports:
- 8083:8083
environment:
CONNECT_BOOTSTRAP_SERVERS: "PLAINTEXT://kafka1:19091"
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: kafka-connect
CONNECT_CONFIG_STORAGE_TOPIC: _connect-configs
CONNECT_OFFSET_STORAGE_TOPIC: _connect-offsets
CONNECT_STATUS_STORAGE_TOPIC: _connect-status
# enable connectors to name their consumer groups
CONNECT_CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY: "All"
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
# internal connector configs have been deprecated
# CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
# CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "connect1"
CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=INFO,org.reflections=ERROR,io.confluent.connect=TRACE"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_PLUGIN_PATH: /usr/share/java,/usr/share/confluent-hub-components
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
# In the command section, $ are replaced with $$ to avoid the error 'Invalid interpolation format for "command" option'
command:
- bash
- -c
- |
echo "Installing Connector"
confluent-hub install confluentinc/kafka-connect-s3:5.5.1 --no-prompt
# needed for tombstone handling
confluent-hub install confluentinc/connect-transforms:latest --no-prompt
confluent-hub install confluentinc/kafka-connect-s3-source:1.3.1 --no-prompt
#
echo "Launching Kafka Connect worker"
/etc/confluent/docker/run &
#
cp /etc/kafka/s3backup_2.13-0.1.0-SNAPSHOT.jar /usr/share/confluent-hub-components/confluentinc-kafka-connect-s3/lib
sleep infinity
volumes:
- ./target/scala-2.13:/etc/kafka
volumes:
minio1-data: