Spring Boot JDBC microservices cannot connect to MySQL in docker - spring-boot

I have 2 Spring Boot microservices that need to connect to a MySQL service which itself is deployed in Docker. I'm using Docker Compose but for some reason they cannot reach the DB. I have used the service name in the connection string but still no connection.
Here is the docker-compose.yml file:
version: "3.8"
services:
mysqldb:
container_name: mysqldb
image: mysql:8
restart: unless-stopped
env_file: ./.env
environment:
- MYSQL_ROOT_PASSWORD=eryrty45fgd3DE
- MYSQL_DATABASE=bank_db
- MYSQL_USER=java_user
- MYSQL_PASSWORD=dd0953
ports:
- 3307:3306
volumes:
- db:/var/lib/mysql
- ./scripts/schema.sql:/docker-entrypoint-initdb.d/1.sql
accounts-service:
container_name: accounts-service
depends_on:
- mysqldb
build:
context: ./accounts-service
dockerfile: Dockerfile
image: accounts-service:latest
restart: on-failure
env_file: ./.env
ports:
- 8000:8000
environment:
- MYSQLDB_URL=jdbc:mysql://mysqldb:3306/bank_db?useSSL=false
- MYSQLDB_USER=java_user
- MYSQL_PASSWORD=dd0953
- NOTIFY_SERVICE_URL=http://notify-service:8002
volumes:
- servs:/tmp
stdin_open: true
tty: true
transactions-service:
container_name: transactions-service
depends_on:
- mysqldb
- accounts-service
- notify-service
build:
context: ./transaction-service
dockerfile: Dockerfile
image: transactions-service:latest
restart: on-failure
env_file: ./.env
ports:
- 8001:8001
environment:
- MYSQLDB_URL=jdbc:mysql://mysqldb:3306/bank_db?useSSL=false
- MYSQLDB_USER=java_user
- MYSQL_PASSWORD=dd0953
- NOTIFY_SERVICE_URL=http://notify-service:8002
- ACCOUNT_SERVICE_URL=http://accounts-service:8000
volumes:
- servs:/tmp
stdin_open: true
tty: true
notify-service:
container_name: notify-service
build:
context: ./notification-service
dockerfile: Dockerfile
image: notify-service:latest
restart: on-failure
ports:
- 8002:8002
volumes:
- servs:/tmp
stdin_open: true
tty: true
volumes:
db:
servs:
Here is my connection string:
spring.datasource.url:jdbc:mysql://mysqldb:3306/bank_db?useSSL=false
The stacktrace:
Caused by: java.net.ConnectException: Connection refused
accounts-service | at java.base/sun.nio.ch.Net.pollConnect(Native Method) ~[na:na]
accounts-service | at java.base/sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[na:na]
accounts-service | at java.base/sun.nio.ch.NioSocketImpl.timedFinishConnect(NioSocketImpl.java:542) ~[na:na]
accounts-service | at java.base/sun.nio.ch.NioSocketImpl.connect(NioSocketImpl.java:597) ~[na:na]
accounts-service | at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:327) ~[na:na]
accounts-service | at java.base/java.net.Socket.connect(Socket.java:633) ~[na:na]
accounts-service | at com.mysql.cj.protocol.StandardSocketFactory.connect(StandardSocketFactory.java:156) ~[mysql-connector-java-8.0.28.jar!/:8.0.28]
accounts-service | at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:63) ~[mysql-connector-java-8.0.28.jar!/:8.0.28]
accounts-service | ... 127 common frames omitted

Related

What is error syntax in my docker-compose.yml and how to fix?

I am using IntelliJ IDEA 2022.1.3 (Ultimate Edition).
My application.properties file
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/scheduler
spring.datasource.username=root
spring.datasource.password=123456a#
spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver
spring.jpa.properties.hibernate.dialect= org.hibernate.dialect.MySQL8Dialect
# spring.jpa.database-platform=org.hibernate.dialect.MySQL8Dialect
# spring.jpa.database-platform=org.hibernate.dialect.MySQL8Dialect
# org.hibernate.dialect.MySQLInnoDBDialect
spring.batch.job.names=job
spring.batch.jdbc.initialize-schema=embedded
spring.batch.jdbc.isolation-level-for-create=default
spring.batch.jdbc.platform=mysql
spring.batch.jdbc.schema=classpath:/schema.sql
spring.batch.jdbc.table-prefix=
spring.batch.job.enabled=true
My file docker-compose.yml
version: "3.8"
services:
mysqldb:
image: mysql:latest
restart: unless-stopped
env_file: ./.env
environment:
- MYSQL_ROOT_PASSWORD=$MYSQLDB_ROOT_PASSWORD
- MYSQL_DATABASE=$MYSQLDB_DATABASE
ports:
- $MYSQLDB_LOCAL_PORT:$MYSQLDB_DOCKER_PORT
volumes:
- db:/var/lib/mysql
app:
depends_on:
- mysqldb
build: ./scheduler
restart: on-failure
env_file: ./.env
ports:
- $SPRING_LOCAL_PORT:$SPRING_DOCKER_PORT
environment:
SPRING_APPLICATION_JSON: '{
"spring.datasource.url" : "jdbc:mysql://mysqldb:$MYSQLDB_DOCKER_PORT/$MYSQLDB_DATABASE?useSSL=false",
"spring.datasource.username" : "$MYSQLDB_USER",
"spring.datasource.password" : "$MYSQLDB_ROOT_PASSWORD",
"spring.jpa.properties.hibernate.dialect" : "org.hibernate.dialect.MySQL8Dialect",
"spring.jpa.hibernate.ddl-auto" : "update"
}'
volumes:
- .m2:/root/.m2
stdin_open: true
tty: true
volumes:
db:
syntax error
How to fix these error?

Port mapping using gateway and microservices in springboot

Maybe I do not understand well the mapping of the port using docker-compose and a very simple gateway. All the project is working fine except for the gateway.
My application.yml is the following:
spring:
application:
name: api-gateway
cloud:
gateway:
routes:
- id: usermanager
uri: http://usermanager:1111
predicates:
- Path=/user/
- id: catalogmanager
uri: http://catalogmanager:2222
predicates:
- Path=/catalog/
- id: paymanager
uri: http://paymanager:3333
predicates:
- Path=/payment/**
While the docker-compose is:
version: '3.4'
x-common-variables: &common-variables
DATASOURCE_USER: ${DB_USER}
DATASOURCE_PASSWORD: ${DB_PASSWORD}
DATASOURCE_PORT: ${DB_PORT}
services:
apigateway:
image: api-gateway
ports:
- "8080:8080"
paymysqldb:
container_name: paymysqldb
image: mysql
ports:
- "3313:3306"
environment:
- MYSQL_DATABASE=${DB_DATABASE_PAY}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_PASSWORD}
- MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD}
volumes:
- paystorage:/var/lib/mysql
usermysqldb:
container_name: usermysqldb
image: mysql
ports:
- "3311:3306"
environment:
- MYSQL_DATABASE=${DB_DATABASE_USER}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_PASSWORD}
- MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD}
volumes:
- userstorage:/var/lib/mysql
catalogmysqldb:
container_name: catalogmysqldb
image: mysql
ports:
- "3312:3306"
environment:
- MYSQL_DATABASE=${DB_DATABASE_CATALOG}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_PASSWORD}
- MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD}
volumes:
- catalogstorage:/var/lib/mysql
paymanager:
container_name: paymanager
image: arausa/payimage
build:
context: .
dockerfile: MicroServices/PaymentManager/Dockerfile
depends_on:
- paymysqldb
ports:
- "3333:3333"
restart: always
environment:
<<: *common-variables
PM_DATASOURCE_HOST: ${DB_HOST_PAY}
PM_DATASOURCE_NAME: ${DB_DATABASE_PAY}
usermanager:
container_name: usermanager
image: arausa/userimage
build:
context: .
dockerfile: MicroServices/UserManager/Dockerfile
depends_on:
- usermysqldb
ports:
- "1111:1111"
restart: always
environment:
<<: *common-variables
UM_DATASOURCE_HOST: ${DB_HOST_USER}
UM_DATASOURCE_NAME: ${DB_DATABASE_USER}
catalogmanager:
container_name: catalogmanager
image: arausa/catalogimage
build:
context: .
dockerfile: MicroServices/CatalogManager/Dockerfile
depends_on:
- catalogmysqldb
ports:
- "2222:2222"
restart: always
environment:
<<: *common-variables
CM_DATASOURCE_HOST: ${DB_HOST_CATALOG}
CM_DATASOURCE_NAME: ${DB_DATABASE_CATALOG}
#kafka usa zookeeper tiene traccia dei broker, topologia della network e info per la sincronizzazione
zookeeper:
image: wurstmeister/zookeeper
#identifica il broker kafka
kafka:
image: wurstmeister/kafka
ports:
- "9092:9092" #porta di default per il broker kafka
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 #serve a dire dove sta girando zookeeper
volumes:
userstorage:
catalogstorage:
paystorage:
The returned error on a call like: http://localhost:8080/user/getAllUser
is the following:
ERROR 1 --- [or-http-epoll-3] a.w.r.e.AbstractErrorWebExceptionHandler : [4b1c3833-1] 500 Server Error for HTTP GET "/user/getAllUsers"
apigateway_1 |
apigateway_1 | io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: usermanager/172.24.0.9:1111
apigateway_1 | Suppressed: reactor.core.publisher.FluxOnAssembly$OnAssemblyException:
apigateway_1 | Error has been observed at the following site(s):
apigateway_1 | *__checkpoint ⇢ org.springframework.cloud.gateway.filter.WeightCalculatorWebFilter [DefaultWebFilterChain]
apigateway_1 | *__checkpoint ⇢ HTTP GET "/user/getAllUsers" [ExceptionHandlingWebHandler]
apigateway_1 | Original Stack Trace:
apigateway_1 | Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused
apigateway_1 | at io.netty.channel.unix.Errors.newConnectException0(Errors.java:155) ~[netty-transport-native-unix-common-4.1.74.Final.jar!/:4.1.74.Final]
apigateway_1 | at io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:128) ~[netty-transport-native-unix-common-4.1.74.Final.jar!/:4.1.74.Final]
apigateway_1 | at io.netty.channel.unix.Socket.finishConnect(Socket.java:320) ~[netty-transport-native-unix-common-4.1.74.Final.jar!/:4.1.74.Final]```
I'm trying to create a simple gateway that the request made on localhost/8080 route through the different microservices. Can someone explain me if something is wrong configured?

Run docker-compose without access to environment variables

Is it possible to run a docker-compose.yml file without having access to the environment variables (on windows)? I have docker desktop installed successfully. Below is the docker-compose.yml.
Best,
---
version: '3.8'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.5.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:5.5.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:5.5.1
hostname: schema-registry
container_name: schema-registry
depends_on:
- zookeeper
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
connect:
image: cnfldemos/cp-server-connect-datagen:0.3.2-5.5.0
hostname: connect
container_name: connect
depends_on:
- zookeeper
- broker
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_ZOOKEEPER_CONNECT: 'zookeeper:2181'
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-5.5.1.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
control-center:
image: confluentinc/cp-enterprise-control-center:5.5.1
hostname: control-center
container_name: control-center
depends_on:
- zookeeper
- broker
- schema-registry
- connect
- ksqldb-server
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_ZOOKEEPER_CONNECT: 'zookeeper:2181'
CONTROL_CENTER_CONNECT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb-server:
image: confluentinc/cp-ksqldb-server:5.5.1
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:5.5.1
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:5.5.1
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
command: "bash -c 'echo Waiting for Kafka to be ready... &&
cub kafka-ready -b broker:29092 1 40 &&
echo Waiting for Confluent Schema Registry to be ready... &&
cub sr-ready schema-registry 8081 40 &&
echo Waiting a few seconds for topic creation to finish... &&
sleep 11 &&
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:5.5.1
depends_on:
- zookeeper
- broker
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'

Why does the connect-datagen container of my confluent kafka docker-compose disconnect after a couple of minutes?

I am trying to run a bare minimum confluent community example on docker for windows (toolbox) using the example given here:
https://docs.confluent.io/current/quickstart/cos-docker-quickstart.html
but seems like all components gets started only connect is failing of them does not work,
And this is my docker-compose.yml
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.1.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:6.1.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:6.1.1
hostname: schema-registry
container_name: schema-registry
depends_on:
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
connect:
image: cnfldemos/cp-server-connect-datagen:0.4.0-6.1.0
hostname: connect
container_name: connect
depends_on:
- broker
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-6.1.1.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
control-center:
image: confluentinc/cp-enterprise-control-center:6.1.1
hostname: control-center
container_name: control-center
depends_on:
- broker
- schema-registry
- connect
- ksqldb-server
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_CONNECT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb-server:
image: confluentinc/cp-ksqldb-server:6.1.1
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true'
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true'
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:6.1.1
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:6.1.1
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:29092 1 40 && \
echo Waiting for Confluent Schema Registry to be ready... && \
cub sr-ready schema-registry 8081 40 && \
echo Waiting a few seconds for topic creation to finish... && \
sleep 11 && \
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:6.1.1
depends_on:
- broker
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
Can you help me?
I had same problem and found solution.
We should focus on the docker container status exited 137. this mean is out-of-memory for container. therefore you must allocate minimally at 6 GB for Docker memory.
You can also check the documentation at Prerequisites:
https://docs.confluent.io/platform/current/quickstart/cos-docker-quickstart.html

Can't connect my spring-boot application running in docker container to cassandra container

I tried pinging from spring-boot container to cassandra container,it's working fine.But while bringing up my apllication I am getting error like .
NoHostAvailableException: All host(s) tried for query failed (tried:
cassandra/172.21.0.3:9042
(com.datastax.driver.core.exceptions.TransportException:
[cassandra/172.21.0.3:9042] Cannot connect))
docker-compose.yml
version: "2"
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.3.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
volumes:
- ./create_topic.sh:/create_topics.sh
broker:
image: confluentinc/cp-enterprise-kafka:5.3.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "29092:29092"
- "9092:9092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: "true"
CONFLUENT_SUPPORT_CUSTOMER_ID: "anonymous"
control-center:
image: confluentinc/cp-enterprise-control-center:5.3.1
hostname: control-center
container_name: control-center
depends_on:
- zookeeper
- broker
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: "broker:29092"
CONTROL_CENTER_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
cassandra:
image: ************/cassandra_goldendata_baseimage:1.0.0
container_name: cassandra
ports:
- "9042:9042"
item-service:
image: item-service
depends_on:
- zookeeper
- broker
- cassandra
container_name: item-servicecontainer
ports:
- "4545:8080"

Resources