I am following reindex documentation here: https://www.elastic.co/guide/en/cloud/current/ec-migrate-data.html
I have two Elasticsearch indexes in localhost:
192.168.0.100:9200
{
"name" : "fses01",
"cluster_name" : "fs-es-cluster",
"cluster_uuid" : "DqVDBBafRaO9UAJPKuc_xQ",
"version" : {
"number" : "7.14.0",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "dd5a0a2acaa2045ff9624f3729fc8a6f40835aa1",
"build_date" : "2021-07-29T20:49:32.864135063Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
With the following index:
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open formshare_records NpEFzsGvTEmivnYjJhwHJg 5 1 51343 0 7.8mb 3.9mb
192.168.0.100:9201
{
"name" : "es01",
"cluster_name" : "es-docker-cluster",
"cluster_uuid" : "bvsRXLwZRtKuWIrIuKx0hg",
"version" : {
"number" : "7.14.2",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
"build_date" : "2021-09-15T10:18:09.722761972Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
With the same index but blank:
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open formshare_records uxvizIAIS82YBHSsvpq4-Q 5 1 0 0 2kb 1kb
I am doing a reindex using CULR with the following:
curl -X POST "192.168.0.100:9201/_reindex" -H 'Content-Type: application/json' -d'
{
"source": {
"remote": {
"host": "http://192.168.0.100:9200"
},
"index": "formshare_records",
"query": {
"match_all": {}
}
},
"dest": {
"index": "formshare_records"
}
}
'
But I get zero transfer:
{"took":34,"timed_out":false,"total":0,"updated":0,"created":0,"deleted":0,"batches":0,"version_conflicts":0,"noops":0,"retries":{"bulk":0,"search":0},"throttled_millis":0,"requests_per_second":-1.0,"throttled_until_millis":0,"failures":[]}
The index definition is the same on both sides:
{"settings": {
"index": {
"number_of_shards": 5,
"number_of_replicas": 1
}
},
"mappings": {
"properties": {
"project_id": {"type": "keyword"},
"form_id": {"type": "keyword"},
"schema": {"type": "keyword"},
"table": {"type": "keyword"}
}
}}
Both of them are under docker:
192.168.0.100:9200
version: '3'
services:
fses01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0
container_name: fses01
environment:
- node.name=fses01
- cluster.name=fs-es-cluster
- discovery.seed_hosts=fses02
- cluster.initial_master_nodes=fses01,fses02
- bootstrap.memory_lock=true
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
- cluster.max_shards_per_node=20000
- script.max_compilations_rate=10000/1m
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /opt/elasticsearch-docker/data:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- esnet
fses02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0
container_name: fses02
environment:
- node.name=fses02
- cluster.name=fs-es-cluster
- discovery.seed_hosts=fses01
- cluster.initial_master_nodes=fses01,fses02
- bootstrap.memory_lock=true
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
- cluster.max_shards_per_node=20000
- script.max_compilations_rate=10000/1m
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /opt/elasticsearch-docker/data2:/usr/share/elasticsearch/data
networks:
- esnet
kib01:
image: docker.elastic.co/kibana/kibana:7.14.0
container_name: kib01
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://fses01:9200
ELASTICSEARCH_HOSTS: '["http://fses01:9200","http://fses02:9200"]'
networks:
- esnet
networks:
esnet:
192.168.0.100:9201
version: '3.7'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.2
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- reindex.remote.whitelist=192.168.0.100:9200
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /opt/elasticsearch710/data:/usr/share/elasticsearch/data
ports:
- 9201:9200
networks:
- elastic
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.2
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /opt/elasticsearch710/data2:/usr/share/elasticsearch/data
networks:
- elastic
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.2
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /opt/elasticsearch710/data3:/usr/share/elasticsearch/data
networks:
- elastic
networks:
elastic:
driver: bridge
The container of 9201 can see the index of 9200:
sudo docker exec -it es01 /bin/bash
[root#943159977b17 elasticsearch]# curl -X GET "192.168.0.100:9200/_cat/indices/formshare_records?v&s=index&pretty"
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open formshare_records NpEFzsGvTEmivnYjJhwHJg 5 1 51343 0 7.8mb 3.9mb
curl -X POST "192.168.0.100:9201/_reindex" -H 'Content-Type: application/json' -d'
{
"source": {
"remote": {
"host": "http://192.168.0.100:9200"
},
"index": "formshare_records",
"query": {
"match_all": {}
}
},
"dest": {
"index": "formshare_records"
}
}
'
{"took":18,"timed_out":false,"total":0,"updated":0,"created":0,"deleted":0,"batches":0,"version_conflicts":0,"noops":0,"retries":{"bulk":0,"search":0},"throttled_millis":0,"requests_per_second":-1.0,"throttled_until_millis":0,"failures":[]}
Any idea why reindex does not work? I tried without query but I get the same!
Related
I'm having some issues, trying to connect a producer container with a kafka container.
I will have 3 differents projects, each running in a docker container on the same machine :
kafka server
producer
consumer
At this moment, my Kafka's server is running well and I have just made a producer which i'm trying to send a message (only basic test).
I'm getting this error :
kafka:9092/bootstrap: Connect to ipv4#172.28.0.3:9092 failed: Connection refused
I checked multiple post/response but I'm kinda lost, I have read I needed the same network so I did it but can't figure what's else I'm missing.
Here is my Kafka server docker compose :
version: '3'
services:
laravel.test:
build:
context: ./vendor/laravel/sail/runtimes/8.1
dockerfile: Dockerfile
args:
WWWGROUP: '${WWWGROUP}'
image: sail-8.1/app
extra_hosts:
- 'host.docker.internal:host-gateway'
ports:
- '${APP_PORT:-80}:80'
environment:
WWWUSER: '${WWWUSER}'
LARAVEL_SAIL: 1
XDEBUG_MODE: '${SAIL_XDEBUG_MODE:-off}'
XDEBUG_CONFIG: '${SAIL_XDEBUG_CONFIG:-client_host=host.docker.internal}'
volumes:
- '.:/var/www/html'
networks:
- sail
depends_on:
- pgsql
pgsql:
image: 'postgres:13'
ports:
- '${FORWARD_DB_PORT:-5432}:5432'
environment:
PGPASSWORD: '${DB_PASSWORD:-secret}'
POSTGRES_DB: '${DB_DATABASE}'
POSTGRES_USER: '${DB_USERNAME}'
POSTGRES_PASSWORD: '${DB_PASSWORD:-secret}'
volumes:
- 'sail-pgsql:/var/lib/postgresql/data'
networks:
- sail
healthcheck:
test:
[
"CMD",
"pg_isready",
"-q",
"-d",
"${DB_DATABASE}",
"-U",
"${DB_USERNAME}"
]
retries: 3
timeout: 5s
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 22181:2181
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
ports:
- 29092:29092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: LISTENER_INTERNAL://kafka:9092,LISTENER_EXTERNAL://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_INTERNAL:PLAINTEXT,LISTENER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_INTERNAL
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
networks:
- kafka
networks:
sail:
driver: bridge
kafka:
driver: bridge
volumes:
sail-pgsql:
driver: local
Here my producer :
version: '3'
services:
laravel.test:
build:
context: ./vendor/laravel/sail/runtimes/8.1
dockerfile: Dockerfile
args:
WWWGROUP: '${WWWGROUP}'
image: sail-8.1/app
extra_hosts:
- 'host.docker.internal:host-gateway'
ports:
- '${APP_PORT:-80}:80'
environment:
WWWUSER: '${WWWUSER}'
LARAVEL_SAIL: 1
XDEBUG_MODE: '${SAIL_XDEBUG_MODE:-off}'
XDEBUG_CONFIG: '${SAIL_XDEBUG_CONFIG:-client_host=host.docker.internal}'
volumes:
- '.:/var/www/html'
networks:
- sail
- proxy
depends_on:
- pgsql
pgsql:
image: 'postgres:13'
ports:
- '${FORWARD_DB_PORT:-5432}:5432'
environment:
PGPASSWORD: '${DB_PASSWORD:-secret}'
POSTGRES_DB: '${DB_DATABASE}'
POSTGRES_USER: '${DB_USERNAME}'
POSTGRES_PASSWORD: '${DB_PASSWORD:-secret}'
volumes:
- 'sail-pgsql:/var/lib/postgresql/data'
networks:
- sail
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "${DB_DATABASE}", "-U", "${DB_USERNAME}"]
retries: 3
timeout: 5s
networks:
sail:
driver: bridge
proxy:
external:
name: server_kafka
volumes:
sail-pgsql:
driver: local
When I inspect the network I see my both container :
docker network inspect server_kafka
[
{
"Name": "server_kafka",
"Id": "6bc8ed3f604da554eeead58dca06ba8dd926673ae9683095de18a56b45bbd70f",
"Created": "2022-02-21T11:55:31.022965034+01:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.28.0.0/16",
"Gateway": "172.28.0.1"
}
]
},
"Internal": false,
"Attachable": true,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"0b4c79952626cc79e757681137523ad918b802945d82df501f7f1ad1141ca841": {
"Name": "microservice-1_laravel.test_1",
"EndpointID": "99398ddf104b8f8ce478a611e6885fba8960fd956c59938cca4fbecc8d7d21c5",
"MacAddress": "02:42:ac:1c:00:02",
"IPv4Address": "172.28.0.2/16",
"IPv6Address": ""
},
"c1bf90faa56f61432ad99e11408ea50bfeaf5fcdeca0bfd9791bf28cb9fea835": {
"Name": "server_kafka_1",
"EndpointID": "4af2450208622da0550d935332c5451c8c53a07ff10a120fb519f30cfbd157cb",
"MacAddress": "02:42:ac:1c:00:03",
"IPv4Address": "172.28.0.3/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {
"com.docker.compose.network": "kafka",
"com.docker.compose.project": "server",
"com.docker.compose.version": "1.29.2"
}
}
]
The doc above is nice.
In my case, I was missing the network for zookeeper in my docker-compose
networks:
- kafka
I'm working on presto on spark. I have Elasticsearch as datasource. Im not able to run the queries using presto.
Elasticsearch.properties -
elasticsearch.ignore-publish-address=true
elasticsearch.default-schema-name=default
elasticsearch.host=localhost
connector.name=elasticsearch
elasticsearch.port=2900
docker-compose.yaml
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.6.1
container_name: elasticsearch
environment:
- xpack.security.enabled=false
- discovery.type=single-node
- network.host=0.0.0.0
ports:
- '9200:9200'
networks:
- pqp-net
networks:
pqp-net:
driver: bridge
I'm getting below error -
*c.f.p.e.client.ElasticsearchClient - Error refreshing nodes
com.facebook.presto.spi.PrestoException: Connection refused*
Well, Im able to fetch the details of Elasticsearch :
http://localhost:9200
{ "name" : "ab751e0dd0ad", "cluster_name" : "docker-cluster", "cluster_uuid" : "3T66bOexSGOo6Pwtt2Ul4Q", "version" : {
"number" : "7.6.1",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "aa751e09be0a5072e8570670309b1f12348f023b",
"build_date" : "2020-02-29T00:15:25.529771Z",
"build_snapshot" : false,
"lucene_version" : "8.4.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1" }, "tagline" : "You Know, for Search" }
If anyone faced same issue, please help.
Thanks in advance
Resolved: I had an issue with port number mentioned in my application. By changing the port ( some other port ) I was able to connect to Elasticsearch.
I've been running my ECK (Elastic Cloud on Kubernetes) cluster for a couple of weeks with no issues. However, 3 days ago filebeat stopped being able to connect to my ES service. All pods are up and running (Elastic, Beats and Kibana).
Also, shelling into filebeats pods and connecting to the Elasticsearch service works just fine:
curl -k -u "user:$PASSWORD" https://quickstart-es-http.quickstart.svc:9200
{
"name" : "aegis-es-default-4",
"cluster_name" : "quickstart",
"cluster_uuid" : "",
"version" : {
"number" : "7.14.0",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "",
"build_date" : "",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
Yet the filebeats pod logs are producing the below error:
ERROR
[publisher_pipeline_output] pipeline/output.go:154
Failed to connect to backoff(elasticsearch(https://quickstart-es-http.quickstart.svc:9200)):
Connection marked as failed because the onConnect callback failed: could not connect to a compatible version of Elasticsearch:
503 Service Unavailable:
{
"error": {
"root_cause": [
{ "type": "master_not_discovered_exception", "reason": null }
],
"type": "master_not_discovered_exception",
"reason": null
},
"status": 503
}
I haven't made any changes so I think it's a case of authentication or SSL certificates needing updating?
My filebeats config looks like this:
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: quickstart
namespace: quickstart
spec:
type: filebeat
version: 7.14.0
elasticsearchRef:
name: quickstart
config:
filebeat:
modules:
- module: gcp
audit:
enabled: true
var.project_id: project_id
var.topic: topic_name
var.subcription: sub_name
var.credentials_file: /usr/certs/credentials_file
var.keep_original_message: false
vpcflow:
enabled: true
var.project_id: project_id
var.topic: topic_name
var.subscription_name: sub_name
var.credentials_file: /usr/certs/credentials_file
firewall:
enabled: true
var.project_id: project_id
var.topic: topic_name
var.subscription_name: sub_name
var.credentials_file: /usr/certs/credentials_file
daemonSet:
podTemplate:
spec:
serviceAccountName: filebeat
automountServiceAccountToken: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
securityContext:
runAsUser: 0
containers:
- name: filebeat
volumeMounts:
- name: varlogcontainers
mountPath: /var/log/containers
- name: varlogpods
mountPath: /var/log/pods
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
- name: credentials
mountPath: /usr/certs
readOnly: true
volumes:
- name: varlogcontainers
hostPath:
path: /var/log/containers
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: credentials
secret:
defaultMode: 420
items:
secretName: elastic-service-account
And it was working just fine - haven't made any changes to this config to make it lose access.
Did a little more digging and found that there weren't enough resources to be able to assign a master node.
Got this when I tried to run GET /_cat/master and it returned the same 503 no master error. I added a new node pool and it started running normally.
In Elastic, you can create roles. For the same index, I would like to create a role to display some fields and for another role hidden some fields.
For that, I found that in the doc 'field_security'.
https://www.elastic.co/guide/en/elastic-stack-overview/7.3/field-level-security.html
Currently I use an Elastic + Kibana version 7.3.1 in a Docker container
My request for create role is :
POST /_security/role/myNewRole
{
"cluster": ["all"],
"indices": [
{
"names": [ "twitter" ],
"privileges": ["all"],
"field_security" : {
"grant" : [ "user", "password" ]
}
}
]
}
And response is :
{
"error": {
"root_cause": [
{
"type": "security_exception",
"reason": "current license is non-compliant for [field and document level security]",
"license.expired.feature": "field and document level security"
}
],
"type": "security_exception",
"reason": "current license is non-compliant for [field and document level security]",
"license.expired.feature": "field and document level security"
},
"status": 403
}
I checked the license with request :
{
"license" : {
"status" : "active",
"uid" : "864f625a-fc7a-41de-91f3-c4a64e045a55",
"type" : "basic",
"issue_date" : "2019-09-10T10:04:38.150Z",
"issue_date_in_millis" : 1568109878150,
"max_nodes" : 1000,
"issued_to" : "docker-cluster",
"issuer" : "elasticsearch",
"start_date_in_millis" : -1
}
}
My docker-file
version: '3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.1
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- ELASTIC_PASSWORD=toto
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "discovery.type=single-node"
- "xpack.security.enabled=true"
- "xpack.security.dls_fls.enabled=true"
ulimits:
memlock:
soft: -1
hard: -1
ports:
- "9200:9200"
networks:
- net
volumes:
- esdata1:/usr/share/elasticsearch/data
kibana:
image: docker.elastic.co/kibana/kibana:7.3.1
environment:
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=toto
ports:
- "5601:5601"
networks:
- net
volumes:
esdata1:
driver: local
networks:
net:
How to fix this licensing problem ?
Thanks
Even though basic security features are free with a BASIC license, "field and document level security" are only available to Platinum-level users... and to Elastic Cloud users.
So the most simple and not too costly way of getting this feature is to subscribe to Elastic Cloud.
I have just started to learn about the ELK stack. I am referring to this site
https://www.elastic.co/guide/en/elastic-stack-get-started/6.4/get-started-elastic-stack.html
for installing the ELK stack in my system I have a problem when I try to start Kibana in my windows system. I get the following error
\log [13:36:52.255] [warning][admin][elasticsearch] Unable to revive connection: http://localhost:9200/
log [13:36:52.277] [warning][admin][elasticsearch] No living connections
log [13:36:52.279] [warning][task_manager] PollError No Living connections
log [13:36:53.810] [warning][admin][elasticsearch] Unable to revive connection: http://localhost:9200/
log [13:36:53.836] [warning][admin][elasticsearch] No living connections
log [13:36:56.456] [warning][admin][elasticsearch] Unable to revive connection: http://localhost:9200/
log [13:36:56.457] [warning][admin][elasticsearch] No living connections
log [13:36:56.458] [warning][task_manager] PollError No Living connections
log [13:36:57.348] [warning][admin][elasticsearch] Unable to revive connection: http://localhost:9200/
log [13:36:57.349] [warning][admin][elasticsearch] No living connections
I think it is having a problem fetching the Elastic Search connection. But I think the elastic search instance has been started successfully. When I run
./bin/elasticsearch.bat
I get the following results
[2019-09-01T18:34:11,594][INFO ][o.e.h.AbstractHttpServerTransport] [DESKTOP-TD85D7S] publish_address {192.168.0.101:9200}, bound_addresses {192.168.99.1:9200}, {192.168.56.1:9200}, {192.168.0.101:9200}
[2019-09-01T18:34:11,595][INFO ][o.e.n.Node ] [DESKTOP-TD85D7S] started
In your kibana.yml configuration file, you need to change the following line:
elasticsearch.hosts: ["http://localhost:9200"]
to
elasticsearch.hosts: ["http://192.168.0.101:9200"]
Note: Elasticsearch 7.4.0, Kibana 7.4.0
status: working.
I am using a docker-compose.yml file to run elasticsearch and kibana on localhost. port 9200 is being used by another service so, I have mapped 9201:9200 (9201 of localhost with 9200 of docker container)
In kibana environment variable we are setting elasticsearch host and port (port should be of container port) eg. ELASTICSEARCH_HOSTS=http://elasticsearch:9200
File: docker-compose.yml
version: '3.7'
services:
# Elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0
container_name: elasticsearch
environment:
- xpack.security.enabled=false
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
cap_add:
- IPC_LOCK
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
ports:
- 9201:9200
- 9300:9300
# Kibana
kibana:
container_name: kibana
image: docker.elastic.co/kibana/kibana:7.4.0
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- 5601:5601
depends_on:
- elasticsearch
volumes:
elasticsearch-data:
driver: local
Elastic search is running at http://localhost:9201, you will get similar to
{
"name" : "d0bb78764b7e",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "Djch5nbnSWC-EqYawp2Cng",
"version" : {
"number" : "7.4.0",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "22e1767283e61a198cb4db791ea66e3f11ab9910",
"build_date" : "2019-09-27T08:36:48.569419Z",
"build_snapshot" : false,
"lucene_version" : "8.2.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
Kibana is running at http://localhost:5601, open in the browser.
Note: if your docker is running on some server other than your local machine, then replace localhost, with that server host
I found the error in a log file: /var/log/elasticsearch/my-instance.log
[2022-07-25T15:59:44,049][ERROR][o.e.b.ElasticsearchUncaughtExceptionHandler]
[nextcloud] uncaught exception in thread [main]
org.elasticsearch.bootstrap.StartupException: ElasticsearchException[failed to bind service];
nested: AccessDeniedException[/var/lib/elasticsearch/nodes];
you have to set the bit s on the folder /var/lib/elasticsearch/nodes
# mkdir /var/lib/elasticsearch/nodes
# chown elasticsearch:elasticsearch /var/lib/elasticsearch/nodes
# chmod g+s /var/lib/elasticsearch/nodes
# ls -ltr /var/lib/elasticsearch/nodes
drwxr-sr-x 5 elasticsearch elasticsearch 4096 25 juil. 16:42 0/
you can then query localhost on port 9200.
# curl http://localhost:9200
{
"name" : "nextcloud",
"cluster_name" : "my-instance",
"cluster_uuid" : "040...V3TA",
"version" : {
"number" : "7.14.1",
"build_flavor" : "default",
"build_type" : "deb",
"build_hash" : "66b...331e",
"build_date" : "2021-08-26T09:01:05.390870785Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
My environment: Debian11.
I installed elasticsearch by hand, by downloading the package elasticsearch-7.14.1-amd64.deb
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.14.1-amd64.deb
hope it helps