What do you do when one minio node has a full disk? - minio

I have a cluster of 8 minio nodes. Each node has a 1.6 - 1.9 TB drive in it. Node 6 has less then a megabyte of free space while the rest have around 200GB to 1 TB of free space. Is there any way to trigger a rebalancing of the used resources?
Minio is run through docker, here is the stack deff:
version: '3.7'
services:
minio1:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio1
volumes:
- /opt/iqdata:/data
ports:
- "9001:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio1==true
minio2:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio2
volumes:
- /opt/iqdata:/data
ports:
- "9002:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio2==true
minio3:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio3
volumes:
- /opt/iqdata:/data
ports:
- "9003:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio3==true
minio4:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
hostname: minio4
volumes:
- /opt/iqdata:/data
ports:
- "9004:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio4==true
minio5:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio5
volumes:
- /opt/iqdata:/data
ports:
- "9005:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio5==true
minio6:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio6
volumes:
- /opt/iqdata:/data
ports:
- "9006:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio6==true
minio7:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio7
volumes:
- /opt/iqdata:/data
ports:
- "9007:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio7==true
minio8:
image: minio/minio:RELEASE.2021-03-17T02-33-02Z
network_mode: "host"
hostname: minio8
volumes:
- /opt/iqdata:/data
ports:
- "9008:9000"
command: server http://minio{1...8}/data
secrets:
- secret_key
- access_key
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
deploy:
replicas: 1
restart_policy:
delay: 10s
placement:
constraints:
- node.labels.minio8==true
secrets:
secret_key:
external: true
access_key:
external: true

MinIO does not perform any rebalancing. It supports expanding a cluster (by adding a new pool of disks) if you are running out of disk space - https://docs.min.io/docs/distributed-minio-quickstart-guide.html.
However your situation seems to be an anomaly - likely a bug in the application (perhaps creating too many versions of the same object) or perhaps a bug in MinIO - it should not happen. Without more information, it's hard to determine the problem.
Please know you can find us at https://slack.min.io/ 24/7/365. If you have commercial questions, please reach out to us on hello#min.io or on our Ask an Expert Chat functionality at https://min.io/pricing?action=talk-to-us.

Related

Change docker-compose.yml from linux version to windows

I wrote docker-compose.yml, that worked fine in linux, but i have to port it on windows. The main problem is path, cause when i replace /<path> with c:/<path>, i get this error:
Error response from daemon: invalid mount config for type "volume":
invalid mount path: 'C://' mount path must be absolute
Here is the original code from linux:
docker-compose.yml
version: '2.5.1'
networks:
selenoid:
external:
name: selenoid
services:
selenoid:
networks:
selenoid: null
image: 'aerokube/selenoid:latest'
container_name: 'selenoid'
volumes:
- '/home/rolf/.aerokube/selenoid:/etc/selenoid'
- '/var/run/docker.sock:/var/run/docker.sock'
command: ['-conf', '/etc/selenoid/browsers.json', '-container-network', 'selenoid']
ports:
- '4444:4444'
mysql_db:
networks:
selenoid: null
image: 'percona:latest'
container_name: 'mysql_db'
environment:
MYSQL_ROOT_PASSWORD: admin
MYSQL_DATABASE: DB_MYAPP
MYSQL_USER: test_qa
MYSQL_PASSWORD: qa_test
ports:
- '3306:3306'
volumes:
- '/home/rolf/final_project/mysql/myapp_db:/docker-entrypoint-initdb.d'
healthcheck:
test: ['CMD', 'mysql', '-uroot', '-padmin', '-h0.0.0.0', '-P3306']
timeout: 2s
retries: 15
mock:
networks:
selenoid: null
image: 'vk_api:latest'
container_name: 'mock'
ports:
- '9000:9000'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://0.0.0.0:9000/status']
timeout: 2s
retries: 15
myapp:
networks:
selenoid: null
image: 'myapp'
container_name: 'myapp'
ports:
- '9999:9999'
links:
- 'mock:mock'
- 'mysql_db:mysql_db'
volumes:
- /home/ilia/final_project:/config_dir
entrypoint: "/app/myapp --config=/config_dir/myapp.conf"
depends_on:
selenoid:
condition: service_started
mysql_db:
condition: service_healthy
mock:
condition: service_healthy
I tried to write -v //c/<path> like i saw in some stackoverflow questions, but that didn't work for me

Elasticsearch pods failing at readiness probe

Elasticsearch pod is not becoming active.
logging-es-data-master-ilmz5zyt-3-deploy 1/1 Running 0 5m
logging-es-data-master-ilmz5zyt-3-qxkml 1/2 Running 0 5m
and events are.
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 5m default-scheduler Successfully assigned logging-es-data-master-ilmz5zyt-3-qxkml to digi-srv-pp-01
Normal Pulled 5m kubelet, digi-srv-pp-01 Container image "docker.io/openshift/origin-logging-elasticsearch:v3.10" already present on machine
Normal Created 5m kubelet, digi-srv-pp-01 Created container
Normal Started 5m kubelet, digi-srv-pp-01 Started container
Normal Pulled 5m kubelet, digi-srv-pp-01 Container image "docker.io/openshift/oauth-proxy:v1.0.0" already present on machine
Normal Created 5m kubelet, digi-srv-pp-01 Created container
Normal Started 5m kubelet, digi-srv-pp-01 Started container
Warning Unhealthy 13s (x55 over 4m) kubelet, digi-srv-pp-01 Readiness probe failed: Elasticsearch node is not ready to accept HTTP requests yet [response code: 000]
Deployment config is
# oc export dc/logging-es-data-master-ilmz5zyt -o yaml
Command "export" is deprecated, use the oc get --export
apiVersion: v1
kind: DeploymentConfig
metadata:
creationTimestamp: null
generation: 5
labels:
component: es
deployment: logging-es-data-master-ilmz5zyt
logging-infra: elasticsearch
provider: openshift
name: logging-es-data-master-ilmz5zyt
spec:
replicas: 1
revisionHistoryLimit: 0
selector:
component: es
deployment: logging-es-data-master-ilmz5zyt
logging-infra: elasticsearch
provider: openshift
strategy:
activeDeadlineSeconds: 21600
recreateParams:
timeoutSeconds: 600
resources: {}
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
component: es
deployment: logging-es-data-master-ilmz5zyt
logging-infra: elasticsearch
provider: openshift
name: logging-es-data-master-ilmz5zyt
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: logging-infra
operator: In
values:
- elasticsearch
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- env:
- name: DC_NAME
value: logging-es-data-master-ilmz5zyt
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_TRUST_CERTIFICATES
value: "true"
- name: SERVICE_DNS
value: logging-es-cluster
- name: CLUSTER_NAME
value: logging-es
- name: INSTANCE_RAM
value: 12Gi
- name: HEAP_DUMP_LOCATION
value: /elasticsearch/persistent/heapdump.hprof
- name: NODE_QUORUM
value: "1"
- name: RECOVER_EXPECTED_NODES
value: "1"
- name: RECOVER_AFTER_TIME
value: 5m
- name: READINESS_PROBE_TIMEOUT
value: "30"
- name: POD_LABEL
value: component=es
- name: IS_MASTER
value: "true"
- name: HAS_DATA
value: "true"
- name: PROMETHEUS_USER
value: system:serviceaccount:openshift-metrics:prometheus
image: docker.io/openshift/origin-logging-elasticsearch:v3.10
imagePullPolicy: IfNotPresent
name: elasticsearch
ports:
- containerPort: 9200
name: restapi
protocol: TCP
- containerPort: 9300
name: cluster
protocol: TCP
readinessProbe:
exec:
command:
- /usr/share/java/elasticsearch/probe/readiness.sh
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 120
resources:
limits:
memory: 12Gi
requests:
cpu: "1"
memory: 12Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/elasticsearch/secret
name: elasticsearch
readOnly: true
- mountPath: /usr/share/java/elasticsearch/config
name: elasticsearch-config
readOnly: true
- mountPath: /elasticsearch/persistent
name: elasticsearch-storage
- args:
- --upstream-ca=/etc/elasticsearch/secret/admin-ca
- --https-address=:4443
- -provider=openshift
- -client-id=system:serviceaccount:openshift-logging:aggregated-logging-elasticsearch
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret=endzaVczSWMzb0NoNlVtVw==
- -basic-auth-password=NXd9xTjg4npjIM0E
- -upstream=https://localhost:9200
- '-openshift-sar={"namespace": "openshift-logging", "verb": "view", "resource":
"prometheus", "group": "metrics.openshift.io"}'
- '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view",
"group": "metrics.openshift.io", "namespace": "openshift-logging"}}'
- --tls-cert=/etc/tls/private/tls.crt
- --tls-key=/etc/tls/private/tls.key
- -pass-access-token
- -pass-user-headers
image: docker.io/openshift/oauth-proxy:v1.0.0
imagePullPolicy: IfNotPresent
name: proxy
ports:
- containerPort: 4443
name: proxy
protocol: TCP
resources:
limits:
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/tls/private
name: proxy-tls
readOnly: true
- mountPath: /etc/elasticsearch/secret
name: elasticsearch
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
node-role.kubernetes.io/compute: "true"
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
supplementalGroups:
- 65534
serviceAccount: aggregated-logging-elasticsearch
serviceAccountName: aggregated-logging-elasticsearch
terminationGracePeriodSeconds: 30
volumes:
- name: proxy-tls
secret:
defaultMode: 420
secretName: prometheus-tls
- name: elasticsearch
secret:
defaultMode: 420
secretName: logging-elasticsearch
- configMap:
defaultMode: 420
name: logging-elasticsearch
name: elasticsearch-config
- name: elasticsearch-storage
persistentVolumeClaim:
claimName: logging-es-0
test: false
triggers: []
status:
availableReplicas: 0
latestVersion: 0
observedGeneration: 0
replicas: 0
unavailableReplicas: 0
updatedReplicas: 0

Run docker-compose without access to environment variables

Is it possible to run a docker-compose.yml file without having access to the environment variables (on windows)? I have docker desktop installed successfully. Below is the docker-compose.yml.
Best,
---
version: '3.8'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.5.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:5.5.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:5.5.1
hostname: schema-registry
container_name: schema-registry
depends_on:
- zookeeper
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
connect:
image: cnfldemos/cp-server-connect-datagen:0.3.2-5.5.0
hostname: connect
container_name: connect
depends_on:
- zookeeper
- broker
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_ZOOKEEPER_CONNECT: 'zookeeper:2181'
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-5.5.1.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
control-center:
image: confluentinc/cp-enterprise-control-center:5.5.1
hostname: control-center
container_name: control-center
depends_on:
- zookeeper
- broker
- schema-registry
- connect
- ksqldb-server
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_ZOOKEEPER_CONNECT: 'zookeeper:2181'
CONTROL_CENTER_CONNECT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb-server:
image: confluentinc/cp-ksqldb-server:5.5.1
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:5.5.1
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:5.5.1
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
command: "bash -c 'echo Waiting for Kafka to be ready... &&
cub kafka-ready -b broker:29092 1 40 &&
echo Waiting for Confluent Schema Registry to be ready... &&
cub sr-ready schema-registry 8081 40 &&
echo Waiting a few seconds for topic creation to finish... &&
sleep 11 &&
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:5.5.1
depends_on:
- zookeeper
- broker
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'

Why does the connect-datagen container of my confluent kafka docker-compose disconnect after a couple of minutes?

I am trying to run a bare minimum confluent community example on docker for windows (toolbox) using the example given here:
https://docs.confluent.io/current/quickstart/cos-docker-quickstart.html
but seems like all components gets started only connect is failing of them does not work,
And this is my docker-compose.yml
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.1.1
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:6.1.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:6.1.1
hostname: schema-registry
container_name: schema-registry
depends_on:
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
connect:
image: cnfldemos/cp-server-connect-datagen:0.4.0-6.1.0
hostname: connect
container_name: connect
depends_on:
- broker
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-6.1.1.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
control-center:
image: confluentinc/cp-enterprise-control-center:6.1.1
hostname: control-center
container_name: control-center
depends_on:
- broker
- schema-registry
- connect
- ksqldb-server
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_CONNECT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb-server:
image: confluentinc/cp-ksqldb-server:6.1.1
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true'
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true'
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:6.1.1
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:6.1.1
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:29092 1 40 && \
echo Waiting for Confluent Schema Registry to be ready... && \
cub sr-ready schema-registry 8081 40 && \
echo Waiting a few seconds for topic creation to finish... && \
sleep 11 && \
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:6.1.1
depends_on:
- broker
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
Can you help me?
I had same problem and found solution.
We should focus on the docker container status exited 137. this mean is out-of-memory for container. therefore you must allocate minimally at 6 GB for Docker memory.
You can also check the documentation at Prerequisites:
https://docs.confluent.io/platform/current/quickstart/cos-docker-quickstart.html

RASA: Sender_id always returned as “default”

I’m using docker-compose for deploying Rasa. I faced an issue when I tried to retrieve the sender_id in actions it always returned as “default”
Please find my docker-compose below:
version: "3.4"
x-database-credentials: &database-credentials
DB_HOST: "db"
DB_PORT: "5432"
DB_USER: "${DB_USER:-admin}"
DB_PASSWORD: "${DB_PASSWORD}"
DB_LOGIN_DB: "${DB_LOGIN_DB:-rasa}"
x-rabbitmq-credentials: &rabbitmq-credentials
RABBITMQ_HOST: "rabbit"
RABBITMQ_USERNAME: "user"
RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD}
x-redis-credentials: &redis-credentials
REDIS_HOST: "redis"
REDIS_PORT: "6379"
REDIS_PASSWORD: ${REDIS_PASSWORD}
REDIS_DB: "1"
x-duckling-credentials: &duckling-credentials
RASA_DUCKLING_HTTP_URL: "http://duckling:8000"
services:
rasa-production:
restart: always
image: "rasa/rasa:${RASA_VERSION}-full"
ports:
- "5006:5005"
volumes:
- ./:/app
command:
- run
- --cors
- "*"
environment:
<<: *database-credentials
<<: *redis-credentials
<<: *rabbitmq-credentials
DB_DATABASE: "${DB_DATABASE:-rasa}"
RABBITMQ_QUEUE: "rasa_production_events"
RASA_TELEMETRY_ENABLED: ${RASA_TELEMETRY_ENABLED:-true}
depends_on:
- app
- rabbit
- redis
- db
app:
restart: always
build: actions/.
volumes:
- ./actions:/app/actions
expose:
- "5055"
environment:
SERVICE_BASE_URL: "${SERVICE_BASE_URL}"
RASA_SDK_VERSION: "${RASA_SDK_VERSION}"
depends_on:
- redis
scheduler:
restart: always
build: scheduler/.
environment:
SERVICE_BASE_URL: "${SERVICE_BASE_URL}"
duckling:
restart: always
image: "rasa/duckling:0.1.6.3"
expose:
- "8000"
command: ["duckling-example-exe", "--no-access-log", "--no-error-log"]
# revers_proxy:
# image: nginx
# ports:
# - 80:80
# - 443:443
# volumes:
# - ./config/nginx/:/etc/nginx/conf.d/
# depends_on:
# - rasa-production
# - app
mongo:
image: mongo:4.2.0
ports:
- 27017:27017
# revers_proxy:
# image: nginx
# ports:
# - 5006:5006
# volumes:
# - ./config/nginx/defaul.conf:/etc/nginx/conf.d/default.conf
# depends_on:
# - rasa-production
# - app
redis:
restart: always
image: "bitnami/redis:6.0.8"
environment:
ALLOW_EMPTY_PASSWORD: "yes"
REDIS_PASSWORD: ${REDIS_PASSWORD}
expose:
- "6379"
redisapp:
restart: always
image: "bitnami/redis:6.0.8"
environment:
ALLOW_EMPTY_PASSWORD: "yes"
expose:
- "6379"
rabbit:
restart: always
image: "bitnami/rabbitmq:3.8.9"
environment:
RABBITMQ_HOST: "rabbit"
RABBITMQ_USERNAME: "user"
RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD}
RABBITMQ_DISK_FREE_LIMIT: "{mem_relative, 0.1}"
expose:
- "5672"
db:
restart: always
image: "bitnami/postgresql:11.9.0"
expose:
- "5432"
environment:
POSTGRESQL_USERNAME: "${DB_USER:-admin}"
POSTGRESQL_PASSWORD: "${DB_PASSWORD}"
POSTGRESQL_DATABASE: "${DB_DATABASE:-rasa}"
volumes:
- ./db:/bitnami/postgresql
endpoints.yml file
tracker_store:
type: sql
dialect: "postgresql"
url: ${DB_HOST}
port: ${DB_PORT}
username: ${DB_USER}
password: ${DB_PASSWORD}
db: ${DB_DATABASE}
login_db: ${DB_LOGIN_DB}
lock_store:
type: "redis"
url: ${REDIS_HOST}
port: ${REDIS_PORT}
password: ${REDIS_PASSWORD}
db: ${REDIS_DB}
event_broker:
type: "pika"
url: ${RABBITMQ_HOST}
username: ${RABBITMQ_USERNAME}
password: ${RABBITMQ_PASSWORD}
queue: rasa_production_events
Rasa Version : 2.1.0
Rasa SDK Version : 2.1.1
Rasa X Version : None
Python Version : 3.8.5
Operating System : Linux-5.4.0-48-generic-x86_64-with-glibc2.29
Python Path : /usr/bin/python3
Please any help to overcome this issue
Looks like a bug described in this issue https://github.com/RasaHQ/rasa/issues/7338

Resources