Couldn't find any Elasticsearch data - spring-boot

when I start kibana and looking in the management tab it is displaying a message
Couldn't find any Elasticsearch data
You'll need to index some data into Elasticsearch before you can create an index pattern
These are the data which are displayed in my elasticsearch cluster through the browser
http://localhost:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open .kibana-event-log-7.9.3-000001 JBL1C589TZWBEIjhe63f1w 1 0 3 0 16.2kb 16.2kb
green open .apm-custom-link nqIKOV7rR8OhzG4Y6UntSA 1 0 0 0 208b 208b
green open .kibana_task_manager_1 3RbGcb5nTrelAfjr8cQ8Gg 1 0 6 38 150.3kb 150.3kb
green open .apm-agent-configuration llIcAZAGTWanNghptfymVQ 1 0 0 0 208b 208b
green open .kibana_1 tMQMj0UdRd-sCZPb631Y5g 1 0 23 9 10.4mb 10.4mb
I can not see logstash index and logstash is running under port 9600
http://localhost:9600/
{"host":"DESKTOP","version":"7.9.3","http_address":"127.0.0.1:9600","id":"b92c8d86-6159-4821-9ace-01bd5328f6af","name":"DESKTOP-MTG14LM","ephemeral_id":"4332a47b-ad63-4e02-a02e-5c233d7a3773","status":"green","snapshot":false,"pipeline":{"workers":4,"batch_size":125,"batch_delay":50},"build_date":"2020-10-16T12:25:47Z","build_sha":"d296f0087bdce367c37596241d5a1f00c9279193","build_snapshot":false}
logstash-sample.conf
input {
file{
type => "syslog"
path => ["D:\Spring Boot Project\demo-gradle\another-log.log"]
}
output {
stdout{
codec => rubydebug
}
elasticsearch {
hosts => ["http://localhost:9200"]
index => "%{[#metadata][beat]}-%{[#metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
kindly support me to resolve this issue
http://localhost:9600/_node/stats/?pretty
{
"host" : "DESKTOP",
"version" : "7.9.3",
"http_address" : "127.0.0.1:9600",
"id" : "b92c8d86-6159-4821-9ace-01bd5328f6af",
"name" : "DESKTOP",
"ephemeral_id" : "15e38fba-b37a-4b7d-9e58-6a89e2082799",
"status" : "green",
"snapshot" : false,
"pipeline" : {
"workers" : 4,
"batch_size" : 125,
"batch_delay" : 50
},
"jvm" : {
"threads" : {
"count" : 30,
"peak_count" : 32
},
"mem" : {
"heap_used_percent" : 24,
"heap_committed_in_bytes" : 1038876672,
"heap_max_in_bytes" : 1038876672,
"heap_used_in_bytes" : 255060960,
"non_heap_used_in_bytes" : 174032152,
"non_heap_committed_in_bytes" : 196833280,
"pools" : {
"old" : {
"max_in_bytes" : 724828160,
"peak_max_in_bytes" : 724828160,
"used_in_bytes" : 125286040,
"committed_in_bytes" : 724828160,
"peak_used_in_bytes" : 226688920
},
"young" : {
"max_in_bytes" : 279183360,
"peak_max_in_bytes" : 279183360,
"used_in_bytes" : 102941904,
"committed_in_bytes" : 279183360,
"peak_used_in_bytes" : 279183360
},
"survivor" : {
"max_in_bytes" : 34865152,
"peak_max_in_bytes" : 34865152,
"used_in_bytes" : 26833016,
"committed_in_bytes" : 34865152,
"peak_used_in_bytes" : 34865144
}
}
},
"gc" : {
"collectors" : {
"old" : {
"collection_time_in_millis" : 713,
"collection_count" : 4
},
"young" : {
"collection_time_in_millis" : 501,
"collection_count" : 8
}
}
},
"uptime_in_millis" : 815971
},
"process" : {
"open_file_descriptors" : -1,
"peak_open_file_descriptors" : -1,
"max_file_descriptors" : -1,
"mem" : {
"total_virtual_in_bytes" : -1
},
"cpu" : {
"total_in_millis" : -1,
"percent" : -3,
"load_average" : null
}
},
"events" : {
"in" : 0,
"filtered" : 0,
"out" : 0,
"duration_in_millis" : 0,
"queue_push_duration_in_millis" : 0
},
"pipelines" : {
"main" : {
"events" : {
"queue_push_duration_in_millis" : 0,
"out" : 0,
"duration_in_millis" : 0,
"in" : 0,
"filtered" : 0
},
"plugins" : {
"inputs" : [ {
"id" : "09ae4aa0701a92b926aee6c9c0abef34b22fe75695ed89371fb40e0ce5666067",
"name" : "file",
"events" : {
"queue_push_duration_in_millis" : 0,
"out" : 0
}
} ],
"codecs" : [ {
"id" : "plain_09312af1-ced8-4a87-8be0-7425fe846651",
"name" : "plain",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
}, {
"id" : "rubydebug_88397be3-dcbe-4553-a788-aa3d4474e141",
"name" : "rubydebug",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 3
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
}, {
"id" : "plain_497bb40b-2eab-4852-a002-e2c7ee4d7ab3",
"name" : "plain",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
} ],
"filters" : [ ],
"outputs" : [ {
"id" : "e48f703a97c1645df3afa1d1b8937faffe8a408694f8a6ba5be6bb23bed53001",
"name" : "stdout",
"events" : {
"out" : 0,
"in" : 0,
"duration_in_millis" : 33
}
}, {
"id" : "ad540803354821020198353da7d7314b73309c07babecea3df737a197017449a",
"name" : "elasticsearch",
"events" : {
"out" : 0,
"in" : 0,
"duration_in_millis" : 4
}
} ]
},
"reloads" : {
"failures" : 0,
"successes" : 0,
"last_success_timestamp" : null,
"last_error" : null,
"last_failure_timestamp" : null
},
"queue" : {
"type" : "memory",
"events_count" : 0,
"queue_size_in_bytes" : 0,
"max_queue_size_in_bytes" : 0
},
"hash" : "661080585b2691f01bac24b363c27f0cfc03a009fbb302424abe96cc1ae50fb5",
"ephemeral_id" : "faf3face-77dc-455f-8632-1ff2e1ebdd7c"
}
},
"reloads" : {
"failures" : 0,
"successes" : 0
},
"os" : { },
"queue" : {
"events_count" : 0
}
bin/logstash --log.level debug
[2020-11-16T21:59:20,627][DEBUG][logstash.runner ] monitoring.elasticsearch.hosts: ["http://localhost:9200"]
[2020-11-16T21:59:20,630][DEBUG][logstash.runner ] monitoring.collection.interval: #<LogStash::Util::TimeValue:0xa362681 #duration=10, #time_unit=:second>
[2020-11-16T21:59:20,635][DEBUG][logstash.runner ] monitoring.collection.timeout_interval: #<LogStash::Util::TimeValue:0x228ca300 #duration=10, #time_unit=:minute>
[2020-11-16T21:59:20,637][DEBUG][logstash.runner ] monitoring.elasticsearch.username: "logstash_system"
[2020-11-16T21:59:20,639][DEBUG][logstash.runner ] monitoring.elasticsearch.ssl.verification_mode: "certificate"
[2020-11-16T21:59:20,640][DEBUG][logstash.runner ] monitoring.elasticsearch.sniffing: false
[2020-11-16T21:59:20,641][DEBUG][logstash.runner ] monitoring.collection.pipeline.details.enabled: true
[2020-11-16T21:59:20,643][DEBUG][logstash.runner ] monitoring.collection.config.enabled: true
[2020-11-16T21:59:20,644][DEBUG][logstash.runner ] node.uuid: ""
[2020-11-16T21:59:20,645][DEBUG][logstash.runner ] --------------- Logstash Settings -------------------
[2020-11-16T21:59:20,711][DEBUG][logstash.config.source.multilocal] Reading pipeline configurations from YAML {:location=>"D:/ELK stack/logstash/config/pipelines.yml"}
ERROR: Pipelines YAML file is empty. Location: D:/ELK stack/logstash/config/pipelines.yml
usage:
bin/logstash -f CONFIG_PATH [-t] [-r] [] [-w COUNT] [-l LOG]
bin/logstash --modules MODULE_NAME [-M "MODULE_NAME.var.PLUGIN_TYPE.PLUGIN_NAME.VARIABLE_NAME=VALUE"] [-t] [-w COUNT] [-l LOG]
bin/logstash -e CONFIG_STR [-t] [--log.level fatal|error|warn|info|debug|trace] [-w COUNT] [-l LOG]
bin/logstash -i SHELL [--log.level fatal|error|warn|info|debug|trace]
bin/logstash -V [--log.level fatal|error|warn|info|debug|trace]
bin/logstash --help
[2020-11-16T21:59:20,755][ERROR][org.logstash.Logstash ] java.lang.IllegalStateException: Logstash stopped processing because of an error: (SystemExit) exit
bin/logstash --log.level debug -f config/logstash-sample.conf
[2020-11-16T22:11:31,227][DEBUG][filewatch.sincedbcollection][main][09ae4aa0701a92b926aee6c9c0abef34b22fe75695ed89371fb40e0ce5666067] writing sincedb (delta since last write = 15)
[2020-11-16T22:11:32,314][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:32,678][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:32,679][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:34,964][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.
[2020-11-16T22:11:37,330][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:37,691][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:37,692][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:39,964][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.
[2020-11-16T22:11:42,336][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:42,697][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:42,697][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:44,960][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.

Your file input is reading from the end of the file. Try modifying your input like this (i.e. read from the beginning):
input {
file{
type => "syslog"
path => ["D:/Spring Boot Project/demo-gradle/another-log.log"]
start_position => "beginning"
}
}
And also make sure to remove this file:
D:/ELK stack/logstash/data/plugins/inputs/file/.sincedb_f8a2243b9184e26704b40adf1d7ef6af

Thank You Val for your answer. In my case, windows has been used as an OS. So the backslash which is used to route the log path should be replaced with the forward slash.
logstash-sample.conf
input {
file{
path => [ "D:/Spring Boot Project/demo-gradle/another-log.log" ]
start_position => "beginning"
}
}
output {
stdout{
codec => rubydebug
}
elasticsearch {
hosts => ["http://localhost:9200"]
}
}

Related

Csv file load through logstash to elasticsearch not working

I am trying to load a csv file from Linux system throughlogstash(docker based) with the below conf file.
./logstash/pipeline/logstash_csv_report.conf
input {
file {
path => "/home/user/elk/logstash/report-file.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
}
}
filter {
csv {
separator => ","
columns => ["start_time", "date", "requester", "full-name", "id", "config", "status"]
}
}
output {
elasticsearch {
action => "index"
hosts => "http://elasticsearch:9200"
index => "project-info"
}
stdout {}
}
I do not know the reason that why is my csv file not getting uploaded into Elasticsearch. My logstash docker logs last few lines as follows. In my logstash i don't see any errors.
logstash | [2021-01-18T04:12:36,076][INFO ][logstash.javapipeline ][main] Pipeline Java execution initialization time {"seconds"=>1.1}
logstash | [2021-01-18T04:12:36,213][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
logstash | [2021-01-18T04:12:36,280][INFO ][filewatch.observingtail ][main][497c9eb0da97efa19ad20783321e7bf30eb302262f92ac565b074e3ad91ea72d] START, creating Discoverer, Watch with file and sincedb collections
logstash | [2021-01-18T04:12:36,282][INFO ][logstash.agent ] Pipelines running {:count=>2, :running_pipelines=>[:".monitoring-logstash", :main], :non_running_pipelines=>[]}
logstash | [2021-01-18T04:12:36,474][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
My docker-compose file as follows.
version: '3.7'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1
container_name: elasticsearch
restart: unless-stopped
environment:
- node.name=elasticsearch
- discovery.seed_hosts=elasticsearch
- cluster.initial_master_nodes=elasticsearch
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ulimits:
memlock:
soft: -1
hard: -1
ports:
- '9200:9200'
- '9300:9300'
volumes:
- './elasticsearch:/usr/share/elasticsearch/data'
networks:
- elk
kibana:
image: docker.elastic.co/kibana/kibana:7.10.1
container_name: kibana
restart: unless-stopped
environment:
ELASTICSEARCH_URL: "http://elasticsearch:9200"
ports:
- '5601:5601'
volumes:
- './kibana:/usr/share/kibana/data'
depends_on:
- elasticsearch
networks:
- elk
logstash:
image: docker.elastic.co/logstash/logstash:7.10.1
container_name: logstash
restart: unless-stopped
environment:
- 'HEAP_SIZE:1g'
- 'LS_JAVA_OPTS=-Xms1g -Xmx1g'
- 'ELASTICSEARCH_HOST:elasticsearch'
- 'ELASTICSEARCH_PORT:9200'
command: sh -c "logstash -f /usr/share/logstash/pipeline/logstash_csv_report.conf"
ports:
- '5044:5044'
- '5000:5000/tcp'
- '5000:5000/udp'
- '9600:9600'
volumes:
- './logstash/pipeline:/usr/share/logstash/pipeline'
depends_on:
- elasticsearch
networks:
- elk
networks:
elk:
driver: bridge
In my ./logstash/pipeline folder i have only logstash_csv_report.conf file.
Same csv file able to upload using Kibana GUI using import option.
Someone please help me to resolve this problem using logstash upload.
Curl output.
# curl -XGET http://51.52.53.54:9600/_node/stats/?pretty
{
"host" : "3c08f83dfc9b",
"version" : "7.10.1",
"http_address" : "0.0.0.0:9600",
"id" : "5f301139-33bf-4e4d-99a0-7b4d7b464675",
"name" : "3c08f83dfc9b",
"ephemeral_id" : "95a0101e-e54d-4f72-aa7a-dd18ccb2814e",
"status" : "green",
"snapshot" : false,
"pipeline" : {
"workers" : 64,
"batch_size" : 125,
"batch_delay" : 50
},
"jvm" : {
"threads" : {
"count" : 157,
"peak_count" : 158
},
"mem" : {
"heap_used_percent" : 16,
"heap_committed_in_bytes" : 4151836672,
"heap_max_in_bytes" : 4151836672,
"heap_used_in_bytes" : 689455928,
"non_heap_used_in_bytes" : 190752760,
"non_heap_committed_in_bytes" : 218345472,
"pools" : {
"survivor" : {
"peak_max_in_bytes" : 143130624,
"max_in_bytes" : 143130624,
"committed_in_bytes" : 143130624,
"peak_used_in_bytes" : 65310304,
"used_in_bytes" : 39570400
},
"old" : {
"peak_max_in_bytes" : 2863333376,
"max_in_bytes" : 2863333376,
"committed_in_bytes" : 2863333376,
"peak_used_in_bytes" : 115589344,
"used_in_bytes" : 115589344
},
"young" : {
"peak_max_in_bytes" : 1145372672,
"max_in_bytes" : 1145372672,
"committed_in_bytes" : 1145372672,
"peak_used_in_bytes" : 1145372672,
"used_in_bytes" : 534296184
}
}
},
"gc" : {
"collectors" : {
"old" : {
"collection_count" : 3,
"collection_time_in_millis" : 1492
},
"young" : {
"collection_count" : 7,
"collection_time_in_millis" : 303
}
}
},
"uptime_in_millis" : 4896504
},
"process" : {
"open_file_descriptors" : 91,
"peak_open_file_descriptors" : 92,
"max_file_descriptors" : 1048576,
"mem" : {
"total_virtual_in_bytes" : 21971415040
},
"cpu" : {
"total_in_millis" : 478180,
"percent" : 0,
"load_average" : {
"1m" : 1.35,
"5m" : 0.7,
"15m" : 0.53
}
}
},
"events" : {
"in" : 0,
"filtered" : 0,
"out" : 0,
"duration_in_millis" : 0,
"queue_push_duration_in_millis" : 0
},
"pipelines" : {
"main" : {
"events" : {
"out" : 0,
"duration_in_millis" : 0,
"queue_push_duration_in_millis" : 0,
"filtered" : 0,
"in" : 0
},
"plugins" : {
"inputs" : [ {
"id" : "497c9eb0da97efa19ad20783321e7bf30eb302262f92ac565b074e3ad91ea72d",
"events" : {
"out" : 0,
"queue_push_duration_in_millis" : 0
},
"name" : "file"
} ],
"codecs" : [ {
"id" : "rubydebug_a060ea28-52ce-4186-a474-272841e0429e",
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
},
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 2
},
"name" : "rubydebug"
}, {
"id" : "plain_d2037602-bfe9-4eaf-8cc8-0a84665fa186",
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
},
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"name" : "plain"
}, {
"id" : "plain_1c01f964-82e5-45a1-b9f9-a400bc2ac486",
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
},
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"name" : "plain"
} ],
"filters" : [ {
"id" : "3eee98d7d4b500333a2c45a729786d4d2aefb7cee7ae79b066a50a1630312b25",
"events" : {
"out" : 0,
"duration_in_millis" : 39,
"in" : 0
},
"name" : "csv"
} ],
"outputs" : [ {
"id" : "8959d62efd3616a9763067781ec2ff67a7d8150d6773a48fc54f71478a9ef7ab",
"events" : {
"out" : 0,
"duration_in_millis" : 0,
"in" : 0
},
"name" : "elasticsearch"
}, {
"id" : "b457147a2293c2dee97b6ee9a5205de24159b520e86eb89be71fde7ba394a0d2",
"events" : {
"out" : 0,
"duration_in_millis" : 22,
"in" : 0
},
"name" : "stdout"
} ]
},
"reloads" : {
"last_success_timestamp" : null,
"last_error" : null,
"successes" : 0,
"failures" : 0,
"last_failure_timestamp" : null
},
"queue" : {
"type" : "memory",
"events_count" : 0,
"queue_size_in_bytes" : 0,
"max_queue_size_in_bytes" : 0
},
"hash" : "3479b7408213a7b52f36d8ad3dbd5a3174768a004119776e0244ed1971814f72",
"ephemeral_id" : "ffc4d5d6-6f90-4c24-8b2a-e932d027a5f2"
},
".monitoring-logstash" : {
"events" : null,
"plugins" : {
"inputs" : [ ],
"codecs" : [ ],
"filters" : [ ],
"outputs" : [ ]
},
"reloads" : {
"last_success_timestamp" : null,
"last_error" : null,
"successes" : 0,
"failures" : 0,
"last_failure_timestamp" : null
},
"queue" : null
}
},
"reloads" : {
"successes" : 0,
"failures" : 0
},
"os" : {
"cgroup" : {
"cpuacct" : {
"usage_nanos" : 478146261497,
"control_group" : "/"
},
"cpu" : {
"cfs_quota_micros" : -1,
"stat" : {
"number_of_times_throttled" : 0,
"time_throttled_nanos" : 0,
"number_of_elapsed_periods" : 0
},
"control_group" : "/",
"cfs_period_micros" : 100000
}
}
},
"queue" : {
"events_count" : 0
}
You need to make sure that /home/user/elk/logstash/report-file.csv can be read by Logstash. I don't see that file being mapped to a volume accessible to Logstash.
In your docker compose configuration you need to add another volume like this:
logstash:
...
volumes:
- './logstash/pipeline:/usr/share/logstash/pipeline'
- '/home/user/elk/logstash:/home/user/elk/logstash'

MongoDB hangs randomly

Overview
I have a ruby application that uses MongoDB as a database. While running tests for this application I am creating collections and indexes for every test case using Minitest.
The test environment is created using docker compose where one container is running the tests and the other container is running MongoDB.
Problem
When running the tests for the first time, after a while MongoDB gets stuck. Any request to query the collections doesn't respond.
I was able to connect to it before the tests started running using the command line client. When I checked the state of the server using db.serverStatus() I see that some operations have acquired locks. Looking at the globalLock field I understand that 1 operation has write lock and there are 2 operations are waiting to acquire read lock.
I am unable to understand why would these operations hang and not yield the locks. I have no idea how to debug this problem further.
MongoDB Version: 3.6.13
Ruby Driver version: 2.8.0
I've also tried other versions 3.6.x and 4.0
Any help or direction is highly appreciated.
db.serverStatus output
{
"host" : "c658c885eb90",
"version" : "3.6.14",
"process" : "mongod",
"pid" : NumberLong(1),
"uptime" : 98,
"uptimeMillis" : NumberLong(97909),
"uptimeEstimate" : NumberLong(97),
"localTime" : ISODate("2019-11-03T16:09:14.289Z"),
"asserts" : {
"regular" : 0,
"warning" : 0,
"msg" : 0,
"user" : 0,
"rollovers" : 0
},
"connections" : {
"current" : 6,
"available" : 838854,
"totalCreated" : 11
},
"extra_info" : {
"note" : "fields vary by platform",
"page_faults" : 0
},
"globalLock" : {
"totalTime" : NumberLong(97908000),
"currentQueue" : {
"total" : 2,
"readers" : 2,
"writers" : 0
},
"activeClients" : {
"total" : 13,
"readers" : 0,
"writers" : 1
}
},
"locks" : {
"Global" : {
"acquireCount" : {
"r" : NumberLong(14528),
"w" : NumberLong(12477),
"W" : NumberLong(5)
}
},
"Database" : {
"acquireCount" : {
"r" : NumberLong(1020),
"w" : NumberLong(14459),
"R" : NumberLong(3),
"W" : NumberLong(6599)
},
"acquireWaitCount" : {
"r" : NumberLong(2)
},
"timeAcquiringMicros" : {
"r" : NumberLong(76077321)
}
},
"Collection" : {
"acquireCount" : {
"R" : NumberLong(1018),
"W" : NumberLong(8805)
}
},
"Metadata" : {
"acquireCount" : {
"W" : NumberLong(37)
}
}
},
"logicalSessionRecordCache" : {
"activeSessionsCount" : 3,
"sessionsCollectionJobCount" : 1,
"lastSessionsCollectionJobDurationMillis" : 0,
"lastSessionsCollectionJobTimestamp" : ISODate("2019-11-03T16:07:36.407Z"),
"lastSessionsCollectionJobEntriesRefreshed" : 0,
"lastSessionsCollectionJobEntriesEnded" : 0,
"lastSessionsCollectionJobCursorsClosed" : 0,
"transactionReaperJobCount" : 0,
"lastTransactionReaperJobDurationMillis" : 0,
"lastTransactionReaperJobTimestamp" : ISODate("2019-11-03T16:07:36.407Z"),
"lastTransactionReaperJobEntriesCleanedUp" : 0
},
"network" : {
"bytesIn" : NumberLong(1682811),
"bytesOut" : NumberLong(1019834),
"physicalBytesIn" : NumberLong(1682811),
"physicalBytesOut" : NumberLong(1019834),
"numRequests" : NumberLong(7822),
"compression" : {
"snappy" : {
"compressor" : {
"bytesIn" : NumberLong(0),
"bytesOut" : NumberLong(0)
},
"decompressor" : {
"bytesIn" : NumberLong(0),
"bytesOut" : NumberLong(0)
}
}
},
"serviceExecutorTaskStats" : {
"executor" : "passthrough",
"threadsRunning" : 6
}
},
"opLatencies" : {
"reads" : {
"latency" : NumberLong(61374),
"ops" : NumberLong(963)
},
"writes" : {
"latency" : NumberLong(13074),
"ops" : NumberLong(286)
},
"commands" : {
"latency" : NumberLong(988232),
"ops" : NumberLong(6570)
}
},
"opReadConcernCounters" : {
"available" : NumberLong(0),
"linearizable" : NumberLong(0),
"local" : NumberLong(0),
"majority" : NumberLong(0),
"none" : NumberLong(944)
},
"opcounters" : {
"insert" : 246,
"query" : 944,
"update" : 40,
"delete" : 0,
"getmore" : 0,
"command" : 6595
},
"opcountersRepl" : {
"insert" : 0,
"query" : 0,
"update" : 0,
"delete" : 0,
"getmore" : 0,
"command" : 0
},
"storageEngine" : {
"name" : "ephemeralForTest",
"supportsCommittedReads" : false,
"readOnly" : false,
"persistent" : false
},
"tcmalloc" : {
"generic" : {
"current_allocated_bytes" : 8203504,
"heap_size" : 12496896
},
"tcmalloc" : {
"pageheap_free_bytes" : 2760704,
"pageheap_unmapped_bytes" : 0,
"max_total_thread_cache_bytes" : 516947968,
"current_total_thread_cache_bytes" : 1007120,
"total_free_bytes" : 1532688,
"central_cache_free_bytes" : 231040,
"transfer_cache_free_bytes" : 294528,
"thread_cache_free_bytes" : 1007120,
"aggressive_memory_decommit" : 0,
"pageheap_committed_bytes" : 12496896,
"pageheap_scavenge_count" : 0,
"pageheap_commit_count" : 9,
"pageheap_total_commit_bytes" : 12496896,
"pageheap_decommit_count" : 0,
"pageheap_total_decommit_bytes" : 0,
"pageheap_reserve_count" : 9,
"pageheap_total_reserve_bytes" : 12496896,
"spinlock_total_delay_ns" : 0,
"formattedString" : "------------------------------------------------\nMALLOC: 8204080 ( 7.8 MiB) Bytes in use by application\nMALLOC: + 2760704 ( 2.6 MiB) Bytes in page heap freelist\nMALLOC: + 231040 ( 0.2 MiB) Bytes in central cache freelist\nMALLOC: + 294528 ( 0.3 MiB) Bytes in transfer cache freelist\nMALLOC: + 1006544 ( 1.0 MiB) Bytes in thread cache freelists\nMALLOC: + 1204480 ( 1.1 MiB) Bytes in malloc metadata\nMALLOC: ------------\nMALLOC: = 13701376 ( 13.1 MiB) Actual memory used (physical + swap)\nMALLOC: + 0 ( 0.0 MiB) Bytes released to OS (aka unmapped)\nMALLOC: ------------\nMALLOC: = 13701376 ( 13.1 MiB) Virtual address space used\nMALLOC:\nMALLOC: 415 Spans in use\nMALLOC: 18 Thread heaps in use\nMALLOC: 4096 Tcmalloc page size\n------------------------------------------------\nCall ReleaseFreeMemory() to release freelist memory to the OS (via madvise()).\nBytes released to the OS take up virtual address space but no physical memory.\n"
}
},
"transactions" : {
"retriedCommandsCount" : NumberLong(0),
"retriedStatementsCount" : NumberLong(0),
"transactionsCollectionWriteCount" : NumberLong(0)
},
"transportSecurity" : {
"1.0" : NumberLong(0),
"1.1" : NumberLong(0),
"1.2" : NumberLong(0),
"1.3" : NumberLong(0),
"unknown" : NumberLong(0)
},
"mem" : {
"bits" : 64,
"resident" : 41,
"virtual" : 836,
"supported" : true,
"mapped" : 0
},
"metrics" : {
"commands" : {
"buildInfo" : {
"failed" : NumberLong(0),
"total" : NumberLong(2)
},
"count" : {
"failed" : NumberLong(0),
"total" : NumberLong(21)
},
"createIndexes" : {
"failed" : NumberLong(0),
"total" : NumberLong(5656)
},
"drop" : {
"failed" : NumberLong(0),
"total" : NumberLong(784)
},
"dropIndexes" : {
"failed" : NumberLong(87),
"total" : NumberLong(87)
},
"find" : {
"failed" : NumberLong(0),
"total" : NumberLong(944)
},
"getLog" : {
"failed" : NumberLong(0),
"total" : NumberLong(1)
},
"insert" : {
"failed" : NumberLong(0),
"total" : NumberLong(246)
},
"isMaster" : {
"failed" : NumberLong(0),
"total" : NumberLong(38)
},
"listCollections" : {
"failed" : NumberLong(0),
"total" : NumberLong(1)
},
"listIndexes" : {
"failed" : NumberLong(1),
"total" : NumberLong(1)
},
"replSetGetStatus" : {
"failed" : NumberLong(1),
"total" : NumberLong(1)
},
"serverStatus" : {
"failed" : NumberLong(0),
"total" : NumberLong(2)
},
"update" : {
"failed" : NumberLong(0),
"total" : NumberLong(40)
},
"whatsmyuri" : {
"failed" : NumberLong(0),
"total" : NumberLong(1)
}
},
"cursor" : {
"timedOut" : NumberLong(0),
"open" : {
"noTimeout" : NumberLong(0),
"pinned" : NumberLong(0),
"total" : NumberLong(0)
}
},
"document" : {
"deleted" : NumberLong(0),
"inserted" : NumberLong(246),
"returned" : NumberLong(398),
"updated" : NumberLong(40)
},
"getLastError" : {
"wtime" : {
"num" : 0,
"totalMillis" : 0
},
"wtimeouts" : NumberLong(0)
},
"operation" : {
"scanAndOrder" : NumberLong(0),
"writeConflicts" : NumberLong(0)
},
"query" : {
"updateOneOpStyleBroadcastWithExactIDCount" : NumberLong(0),
"upsertReplacementCannotTargetByQueryCount" : NumberLong(0)
},
"queryExecutor" : {
"scanned" : NumberLong(435),
"scannedObjects" : NumberLong(438)
},
"record" : {
"moves" : NumberLong(0)
},
"repl" : {
"executor" : {
"pool" : {
"inProgressCount" : 0
},
"queues" : {
"networkInProgress" : 0,
"sleepers" : 0
},
"unsignaledEvents" : 0,
"shuttingDown" : false,
"networkInterface" : "\nNetworkInterfaceASIO Operations' Diagnostic:\nOperation: Count: \nConnecting 0 \nIn Progress 0 \nSucceeded 0 \nCanceled 0 \nFailed 0 \nTimed Out 0 \n\n"
},
"apply" : {
"attemptsToBecomeSecondary" : NumberLong(0),
"batchSize" : NumberLong(0),
"batches" : {
"num" : 0,
"totalMillis" : 0
},
"ops" : NumberLong(0)
},
"buffer" : {
"count" : NumberLong(0),
"maxSizeBytes" : NumberLong(0),
"sizeBytes" : NumberLong(0)
},
"initialSync" : {
"completed" : NumberLong(0),
"failedAttempts" : NumberLong(0),
"failures" : NumberLong(0)
},
"network" : {
"bytes" : NumberLong(0),
"getmores" : {
"num" : 0,
"totalMillis" : 0
},
"ops" : NumberLong(0),
"readersCreated" : NumberLong(0)
},
"preload" : {
"docs" : {
"num" : 0,
"totalMillis" : 0
},
"indexes" : {
"num" : 0,
"totalMillis" : 0
}
}
},
"storage" : {
"freelist" : {
"search" : {
"bucketExhausted" : NumberLong(0),
"requests" : NumberLong(0),
"scanned" : NumberLong(0)
}
}
},
"ttl" : {
"deletedDocuments" : NumberLong(0),
"passes" : NumberLong(1)
}
},
"ok" : 1
}

Number of records processed in logstash

We're using logstash to sync Elastic search and we've around 3 million documents. It takes 3 to 4 hours to sync. Currently all we get is, it is started and stopped. Is there any way to see how many records processed in logstash ?
If you're using Logstash 5 and higher, the Logstash Monitoring API can help you. You can see and monitor what's happening inside Logstash as it processes events. If you hit the Pipeline stats API you'll get the total number of processed events per stage and plugin (input/filter/output):
curl -XGET 'localhost:9600/_node/stats/pipelines?pretty'
You'll get this type of response in which you can clearly see at any time how many events have been processed:
{
"pipelines" : {
"test" : {
"events" : {
"duration_in_millis" : 365495,
"in" : 216485,
"filtered" : 216485,
"out" : 216485,
"queue_push_duration_in_millis" : 342466
},
"plugins" : {
"inputs" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-1",
"events" : {
"out" : 216485,
"queue_push_duration_in_millis" : 342466
},
"name" : "beats"
} ],
"filters" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-2",
"events" : {
"duration_in_millis" : 55969,
"in" : 216485,
"out" : 216485
},
"failures" : 216485,
"patterns_per_field" : {
"message" : 1
},
"name" : "grok"
}, {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-3",
"events" : {
"duration_in_millis" : 3326,
"in" : 216485,
"out" : 216485
},
"name" : "geoip"
} ],
"outputs" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-4",
"events" : {
"duration_in_millis" : 278557,
"in" : 216485,
"out" : 216485
},
"name" : "elasticsearch"
} ]
},
"reloads" : {
"last_error" : null,
"successes" : 0,
"last_success_timestamp" : null,
"last_failure_timestamp" : null,
"failures" : 0
},
"queue" : {
"type" : "memory"
}
}
}

Elastic Search 2.3.4 Stops allocating shards with no obvious reason

I am attempting to upgrade our Elastic Search cluster from 1.6 to 2.3.4. The upgrade seems to work, and I can see shard allocation starting to happen within Kopf - but at some point the shard allocation appears to stop with many shards left unallocated, and no errors being reported in the logs. Typically I'm left with 1200 / 3800 shards unallocated.
We have a typical 3 node cluster and I am trialing this standalone on my local machine with all 3 nodes running on my local machine.
I have seen similar symptoms reported - see https://t37.net/how-to-fix-your-elasticsearch-cluster-stuck-in-initializing-shards-mode.html
. The solution here seemed to be to manually allocate the shards, which I've tried (and works) but I'm at a loss to explain the behaviour of elastic search here. I'd prefer not to go down this route, as I want my cluster to spin up automatically without intervention.
There is also https://github.com/elastic/elasticsearch/pull/14494 which seems to be resolved with the latest ES version, so shouldn't be a problem.
There are no errors in log files - I have upped the root level logging to 'DEBUG' in order to see what I can. What I can see is lines like the below for each unallocated shard (this from the master node logs):
[2016-07-26 09:18:04,859][DEBUG][gateway ] [germany] [index][4] found 0 allocations of [index][4], node[null], [P], v[0], s[UNASSIGNED], unassigned_info[[reason=CLUSTER_RECOVERED], at[2016-07-26T08:05:04.447Z]], highest version: [-1]
[2016-07-26 09:18:04,859][DEBUG][gateway ] [germany] [index][4]: not allocating, number_of_allocated_shards_found [0]
Config file (with comments removed):
cluster.name: elasticsearch-jm-2.3.4
node.name: germany
script.inline: true
script.indexed: true
If I query the cluster health after reallocation has stopped - I get the response below:
http://localhost:9200/_cluster/health?pretty
cluster_name : elasticsearch-jm-2.3.4
status : red
timed_out : False
number_of_nodes : 3
number_of_data_nodes : 3
active_primary_shards : 1289
active_shards : 2578
relocating_shards : 0
initializing_shards : 0
unassigned_shards : 1264
delayed_unassigned_shards : 0
number_of_pending_tasks : 0
number_of_in_flight_fetch : 0
task_max_waiting_in_queue_millis : 0
active_shards_percent_as_number : 67.10046850598647
Further querying for shards - filtered to one index with unallocated shards. As can be seen - shard 0 and 4 are unallocated whereas shard 1 2 and 3 have been allocated :
http://localhost:9200/_cat/shards
cs-payment-warn-2016.07.20 3 p STARTED 106 92.4kb 127.0.0.1 germany
cs-payment-warn-2016.07.20 3 r STARTED 106 92.4kb 127.0.0.1 switzerland
cs-payment-warn-2016.07.20 4 p UNASSIGNED
cs-payment-warn-2016.07.20 4 r UNASSIGNED
cs-payment-warn-2016.07.20 2 r STARTED 120 74.5kb 127.0.0.1 cyprus
cs-payment-warn-2016.07.20 2 p STARTED 120 74.5kb 127.0.0.1 germany
cs-payment-warn-2016.07.20 1 r STARTED 120 73.8kb 127.0.0.1 cyprus
cs-payment-warn-2016.07.20 1 p STARTED 120 73.8kb 127.0.0.1 germany
cs-payment-warn-2016.07.20 0 p UNASSIGNED
cs-payment-warn-2016.07.20 0 r UNASSIGNED
Manually rerouting an unassigned shard appears to work - (stripped back results set)
http://localhost:9200/_cluster/reroute
POST:
{
"dry_run": true,
"commands": [
{
"allocate": {
"index": "cs-payment-warn-2016.07.20",
"shard": 4,
"node": "switzerland" ,
"allow_primary": true
}
}
]
}
Response:
{
"acknowledged" : true,
"state" : {
"version" : 722,
"state_uuid" : "Vw2vPoCMQk2ZosjzviD4TQ",
"master_node" : "yhL7XXy-SKu_WAM-C33dzA",
"blocks" : {},
"nodes" : {},
"routing_table" : {
"indices" : {
"cs-payment-warn-2016.07.20" : {
"shards" : {
"3" : [{
"state" : "STARTED",
"primary" : true,
"node" : "yhL7XXy-SKu_WAM-C33dzA",
"relocating_node" : null,
"shard" : 3,
"index" : "cs-payment-warn-2016.07.20",
"version" : 22,
"allocation_id" : {
"id" : "x_Iq88hmTqiasrjW09hVuw"
}
}, {
"state" : "STARTED",
"primary" : false,
"node" : "1a8dgBscTUS3c7Pv4mN9CQ",
"relocating_node" : null,
"shard" : 3,
"index" : "cs-payment-warn-2016.07.20",
"version" : 22,
"allocation_id" : {
"id" : "DF-EUEy_SpeUElnZI6cgsQ"
}
}
],
"4" : [{
"state" : "INITIALIZING",
"primary" : true,
"node" : "1a8dgBscTUS3c7Pv4mN9CQ",
"relocating_node" : null,
"shard" : 4,
"index" : "cs-payment-warn-2016.07.20",
"version" : 1,
"allocation_id" : {
"id" : "1tw7C7YPQsWwm_O-8mYHRg"
},
"unassigned_info" : {
"reason" : "INDEX_CREATED",
"at" : "2016-07-26T14:20:15.395Z",
"details" : "force allocation from previous reason CLUSTER_RECOVERED, null"
}
}, {
"state" : "UNASSIGNED",
"primary" : false,
"node" : null,
"relocating_node" : null,
"shard" : 4,
"index" : "cs-payment-warn-2016.07.20",
"version" : 1,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}
],
"2" : [{
"state" : "STARTED",
"primary" : false,
"node" : "rlRQ2u0XQRqxWld-wSrOug",
"relocating_node" : null,
"shard" : 2,
"index" : "cs-payment-warn-2016.07.20",
"version" : 22,
"allocation_id" : {
"id" : "eQ-_vWNbRp27So0iGSitmA"
}
}, {
"state" : "STARTED",
"primary" : true,
"node" : "yhL7XXy-SKu_WAM-C33dzA",
"relocating_node" : null,
"shard" : 2,
"index" : "cs-payment-warn-2016.07.20",
"version" : 22,
"allocation_id" : {
"id" : "O1PU1_NVS8-uB2yBrG76MA"
}
}
],
"1" : [{
"state" : "STARTED",
"primary" : false,
"node" : "rlRQ2u0XQRqxWld-wSrOug",
"relocating_node" : null,
"shard" : 1,
"index" : "cs-payment-warn-2016.07.20",
"version" : 24,
"allocation_id" : {
"id" : "ZmxtOvorRVmndR15OJMkMA"
}
}, {
"state" : "STARTED",
"primary" : true,
"node" : "yhL7XXy-SKu_WAM-C33dzA",
"relocating_node" : null,
"shard" : 1,
"index" : "cs-payment-warn-2016.07.20",
"version" : 24,
"allocation_id" : {
"id" : "ZNgzePThQxS-iqhRSXzZCw"
}
}
],
"0" : [{
"state" : "UNASSIGNED",
"primary" : true,
"node" : null,
"relocating_node" : null,
"shard" : 0,
"index" : "cs-payment-warn-2016.07.20",
"version" : 0,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}, {
"state" : "UNASSIGNED",
"primary" : false,
"node" : null,
"relocating_node" : null,
"shard" : 0,
"index" : "cs-payment-warn-2016.07.20",
"version" : 0,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}
]
}
}
},
"routing_nodes" : {
"unassigned" : [{
"state" : "UNASSIGNED",
"primary" : false,
"node" : null,
"relocating_node" : null,
"shard" : 4,
"index" : "cs-payment-warn-2016.07.20",
"version" : 1,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}, {
"state" : "UNASSIGNED",
"primary" : true,
"node" : null,
"relocating_node" : null,
"shard" : 0,
"index" : "cs-payment-warn-2016.07.20",
"version" : 0,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}, {
"state" : "UNASSIGNED",
"primary" : false,
"node" : null,
"relocating_node" : null,
"shard" : 0,
"index" : "cs-payment-warn-2016.07.20",
"version" : 0,
"unassigned_info" : {
"reason" : "CLUSTER_RECOVERED",
"at" : "2016-07-26T11:24:11.868Z"
}
}
]
},
"nodes" : {
"rlRQ2u0XQRqxWld-wSrOug" : [{
"state" : "STARTED",
"primary" : false,
"node" : "rlRQ2u0XQRqxWld-wSrOug",
"relocating_node" : null,
"shard" : 2,
"index" : "cs-payment-warn-2016.07.20",
"version" : 22,
"allocation_id" : {
"id" : "eQ-_vWNbRp27So0iGSitmA"
}
}, {
"state" : "STARTED",
"primary" : false,
"node" : "rlRQ2u0XQRqxWld-wSrOug",
"relocating_node" : null,
"shard" : 1,
"index" : "cs-payment-warn-2016.07.20",
"version" : 24,
"allocation_id" : {
"id" : "ZmxtOvorRVmndR15OJMkMA"
}
}
]
}
}
}
}

MongoDB FindAndModify Sorting

I am using FindAndModify in MongoDB in several concurrent processes. The collection size is about 3 million entries and everything works like a blast as long as I don't pass a sorting option (by an indexed field). Once I try to do so, the following warning is spawned to the logs:
warning: ClientCursor::yield can't unlock b/c of recursive lock ns: test_db.wengine_queue top:
{
opid: 424210,
active: true,
lockType: "write",
waitingForLock: false,
secs_running: 0,
op: "query",
ns: "test_db",
query: {
findAndModify: "wengine_queue",
query: {
locked: { $ne: 1 },
rule_completed: { $in: [ "", "0", null ] },
execute_at: { $lt: 1324381363 },
company_id: 23,
debug: 0,
system_id: "AK/AK1201"
},
update: {
$set: { locked: 1 }
},
sort: {
execute_at: -1
}
},
client: "127.0.0.1:60873",
desc: "conn",
threadId: "0x1541bb000",
connectionId: 1147,
numYields: 0
}
I do have all the keys from the query indexed, here they are:
PRIMARY> db.wengine_queue.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "_id_"
},
{
"v" : 1,
"key" : {
"system_id" : 1,
"company_id" : 1,
"locked" : 1,
"rule_completed" : 1,
"execute_at" : -1,
"debug" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "system_id_1_company_id_1_locked_1_rule_completed_1_execute_at_-1_debug_1"
},
{
"v" : 1,
"key" : {
"debug" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "debug_1"
},
{
"v" : 1,
"key" : {
"system_id" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "system_id_1"
},
{
"v" : 1,
"key" : {
"company_id" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "company_id_1"
},
{
"v" : 1,
"key" : {
"locked" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "locked_1"
},
{
"v" : 1,
"key" : {
"rule_completed" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "rule_completed_1"
},
{
"v" : 1,
"key" : {
"execute_at" : -1
},
"ns" : "test_db.wengine_queue",
"name" : "execute_at_-1"
},
{
"v" : 1,
"key" : {
"thread_id" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "thread_id_1"
},
{
"v" : 1,
"key" : {
"rule_id" : 1
},
"ns" : "test_db.wengine_queue",
"name" : "rule_id_1"
}
]
Is there any way around this?
For those interested -- I had to create a separate index ending with the key that the set is to be sorted by.
That warning is thrown when an operation that wants to yield (such as long updates, removes, etc.) cannot do so because it cannot release the lock it's holding for whatever reason.
Do you have the field you're sorting on indexed? If not adding an index for that will probably remove the warnings.

Resources