I have followed the steps as shown in the video. I am trying to run the below logstash3.conf file:
input{
file{
path=>"/var/log/syslog"
start_position=>"beginning"
sincedb_path => "/dev/null"
}
}
output{
elasticsearch{
hosts=>["elasticsearch:9200"]
index=>"pop"
}
stdout{}
}
I gave the following command to run logstash3.conf file
docker run -h logstash3 --name logstash3 --link elasticsearch:elasticsearch --rm -v "$PWD":/config-dir logstash -f /config-dir/logstash3.conf
I am getting the below error(after which it does not print anything to the screen):
Sending Logstash's logs to /var/log/logstash which is now configured via log4j2.properties
10:00:19.082 [main] INFO logstash.modules.scaffold - Initializing module {:module_name=>"netflow", :directory=>"/usr/share/logstash/modules/netflow/configuration"}
10:00:19.091 [main] INFO logstash.modules.scaffold - Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}
10:00:19.163 [main] INFO logstash.setting.writabledirectory - Creating directory {:setting=>"path.queue", :path=>"/var/lib/logstash/queue"}
10:00:19.164 [main] INFO logstash.setting.writabledirectory - Creating directory {:setting=>"path.dead_letter_queue", :path=>"/var/lib/logstash/dead_letter_queue"}
10:00:19.275 [LogStash::Runner] INFO logstash.agent - No persistent UUID file found. Generating new UUID {:uuid=>"81be107c-ad55-4efb-b7a9-873179a33b06", :path=>"/var/lib/logstash/uuid"}
10:00:20.737 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
10:00:20.738 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://elasticsearch:9200/, :path=>"/"}
10:00:20.969 [[main]-pipeline-manager] WARN logstash.outputs.elasticsearch - Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
10:00:21.342 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Using mapping template from {:path=>nil}
10:00:21.345 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>50001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"_all"=>{"enabled"=>true, "norms"=>false}, "dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date", "include_in_all"=>false}, "#version"=>{"type"=>"keyword", "include_in_all"=>false}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
10:00:21.364 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//elasticsearch:9200"]}
10:00:21.377 [[main]-pipeline-manager] INFO logstash.pipeline - Starting pipeline {"id"=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>125}
10:00:21.962 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
10:00:22.129 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
Kindly let me know how I can correct the error?
Related
I am trying to run the ELK stack using docker. But unfortunately, logstash container is not running and I am unable to find the exact error why it's failing.
Here is my docker-compose file:
version: '3.7'
services:
elasticsearch:
image: elasticsearch:7.9.2
ports:
- '9200:9200'
networks:
- elk
environment:
- discovery.type=single-node
- xpack.security.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
logstash:
image: logstash:7.9.2
ports:
- '5000:5000'
networks:
- elk
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
logstash.yml
---
## Default Logstash configuration from Logstash base image.
## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
#
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
## X-Pack security credentials
#
xpack.monitoring.enabled: true
#xpack.monitoring.elasticsearch.username: elastic
#xpack.monitoring.elasticsearch.password: changeme
logstash.conf
input{
file{
path => "C:\Users\User1\Downloads\library-mgmt-system-logs\user-service\user-service.log"
start_position => "beginning"
}
}
output{
elasticsearch{
hosts => "elasticsearch:9200"
index => "library-mgmt-system-logstash-index"
ecs_compatibility => disabled
}
}
logstash shutdown logs:
OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
WARNING: An illegal reflective access operation has occurred
WARNING: Illegal reflective access by org.jruby.ext.openssl.SecurityHelper (file:/tmp/jruby-1/jruby280139731768845147jopenssl.jar) to field java.security.MessageDigest.provider
WARNING: Please consider reporting this to the maintainers of org.jruby.ext.openssl.SecurityHelper
WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
WARNING: All illegal access operations will be denied in a future release
Sending Logstash logs to /usr/share/logstash/logs which is now configured via log4j2.properties
[2021-08-01T08:42:44,135][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.9.2", "jruby.version"=>"jruby 9.2.13.0 (2.5.7) 2020-08-03 9a89c94bcc OpenJDK 64-Bit Server VM 11.0.8+10-LTS on 11.0.8+10-LTS +indy +jit [linux-x86_64]"}
[2021-08-01T08:42:44,172][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
[2021-08-01T08:42:44,184][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
[2021-08-01T08:42:44,578][INFO ][logstash.agent ] No persistent UUID file found. Generating new UUID {:uuid=>"b15dc5df-3deb-4698-aa37-e114a733bfa9", :path=>"/usr/share/logstash/data/uuid"}
[2021-08-01T08:42:45,186][WARN ][deprecation.logstash.monitoringextension.pipelineregisterhook] Internal collectors option for Logstash monitoring is deprecated and targeted for removal in the next major version.
Please configure Metricbeat to monitor Logstash. Documentation can be found at:
https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html
[2021-08-01T08:42:46,007][INFO ][logstash.licensechecker.licensereader] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2021-08-01T08:42:46,306][WARN ][logstash.licensechecker.licensereader] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2021-08-01T08:42:46,394][INFO ][logstash.licensechecker.licensereader] ES Output version determined {:es_version=>7}
[2021-08-01T08:42:46,399][WARN ][logstash.licensechecker.licensereader] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2021-08-01T08:42:46,642][INFO ][logstash.monitoring.internalpipelinesource] Monitoring License OK
[2021-08-01T08:42:46,644][INFO ][logstash.monitoring.internalpipelinesource] Validated license for monitoring. Enabling monitoring pipeline.
[2021-08-01T08:42:48,382][INFO ][org.reflections.Reflections] Reflections took 32 ms to scan 1 urls, producing 22 keys and 45 values
[2021-08-01T08:42:48,706][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2021-08-01T08:42:48,706][INFO ][logstash.outputs.elasticsearchmonitoring][.monitoring-logstash] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2021-08-01T08:42:48,725][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2021-08-01T08:42:48,725][WARN ][logstash.outputs.elasticsearchmonitoring][.monitoring-logstash] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2021-08-01T08:42:48,736][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>7}
[2021-08-01T08:42:48,736][INFO ][logstash.outputs.elasticsearchmonitoring][.monitoring-logstash] ES Output version determined {:es_version=>7}
[2021-08-01T08:42:48,736][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2021-08-01T08:42:48,736][WARN ][logstash.outputs.elasticsearchmonitoring][.monitoring-logstash] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2021-08-01T08:42:48,785][INFO ][logstash.outputs.elasticsearchmonitoring][.monitoring-logstash] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearchMonitoring", :hosts=>["http://elasticsearch:9200"]}
[2021-08-01T08:42:48,788][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//elasticsearch:9200"]}
[2021-08-01T08:42:48,793][WARN ][logstash.javapipeline ][.monitoring-logstash] 'pipeline.ordered' is enabled and is likely less efficient, consider disabling if preserving event order is not necessary
[2021-08-01T08:42:48,833][INFO ][logstash.outputs.elasticsearch][main] Using a default mapping template {:es_version=>7, :ecs_compatibility=>:disabled}
[2021-08-01T08:42:48,879][INFO ][logstash.javapipeline ][.monitoring-logstash] Starting pipeline {:pipeline_id=>".monitoring-logstash", "pipeline.workers"=>1, "pipeline.batch.size"=>2, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>2, "pipeline.sources"=>["monitoring pipeline"], :thread=>"#<Thread:0xb20b7c7#/usr/share/logstash/logstash-core/lib/logstash/pipelines_registry.rb:141 run>"}
[2021-08-01T08:42:48,888][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>1000, "pipeline.sources"=>["/usr/share/logstash/pipeline/logstash.conf"], :thread=>"#<Thread:0x62ff495a#/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:122 run>"}
[2021-08-01T08:42:48,901][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date"}, "#version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2021-08-01T08:42:48,931][INFO ][logstash.outputs.elasticsearch][main] Installing elasticsearch template to _template/logstash
[2021-08-01T08:42:49,686][INFO ][logstash.javapipeline ][.monitoring-logstash] Pipeline Java execution initialization time {"seconds"=>0.81}
[2021-08-01T08:42:49,688][INFO ][logstash.javapipeline ][main] Pipeline Java execution initialization time {"seconds"=>0.8}
[2021-08-01T08:42:49,730][INFO ][logstash.javapipeline ][.monitoring-logstash] Pipeline started {"pipeline.id"=>".monitoring-logstash"}
[2021-08-01T08:42:50,840][ERROR][logstash.agent ] Failed to execute action {:id=>:main, :action_type=>LogStash::ConvergeResult::FailedAction, :message=>"Could not execute action: PipelineAction::Create<main>, action_result: false", :backtrace=>nil}
[2021-08-01T08:42:51,147][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2021-08-01T08:42:53,108][INFO ][logstash.javapipeline ] Pipeline terminated {"pipeline.id"=>".monitoring-logstash"}
[2021-08-01T08:42:53,162][INFO ][logstash.runner ] Logstash shut down.
I resolved this issue. Please refer the below updated files
docker-compose.yaml
logstash:
image: logstash:7.13.4
ports:
- '5000:5000'
networks:
- elk
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
- type: bind
source: C:/Users/Rupesh_Patil/Desktop/logstash-data
target: /usr/share/logs/
read_only: true
depends_on:
- elasticsearch
logstash.conf
input{
file{
type=>"user"
path=>"/usr/share/logs/user-service/user-service.log"
start_position=>"beginning"
}
}
output{
elasticsearch{
hosts => "elasticsearch:9200"
index => "library-mgmt-system-logstash-index"
ecs_compatibility => disabled
}
}
I have logstash and Elastsearch on different machines. When i run Logstash on same machine it works all fine(with 'localhost' in hosts) but when i specify IP address in the Hosts section of Conf file it does not creates index. the output from Logstash is as follows:-
Java HotSpot(TM) 64-Bit Server VM warning: Ignoring option UseConcMarkSweepGC; support was removed in 14.0
Java HotSpot(TM) 64-Bit Server VM warning: Ignoring option CMSInitiatingOccupancyFraction; support was removed in 14.0
Java HotSpot(TM) 64-Bit Server VM warning: Ignoring option UseCMSInitiatingOccupancyOnly; support was removed in 14.0
WARNING: An illegal reflective access operation has occurred
WARNING: Illegal reflective access by com.headius.backport9.modules.Modules (file:/C:/Project/logstash-7.7.0/logstash-core/lib/jars/jruby-complete-9.2.11.1.jar) to field java.io.Console.cs
WARNING: Please consider reporting this to the maintainers of com.headius.backport9.modules.Modules
WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
WARNING: All illegal access operations will be denied in a future release
Sending Logstash logs to C:/Project/logstash-7.7.0/logs which is now configured via log4j2.properties
[2020-05-19T19:45:01,169][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2020-05-19T19:45:01,279][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.7.0"}
[2020-05-19T19:45:02,516][INFO ][org.reflections.Reflections] Reflections took 47 ms to scan 1 urls, producing 21 keys and 41 values
[2020-05-19T19:45:03,723][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://192.168.51.100:9200/]}}
[2020-05-19T19:45:03,911][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://192.168.51.100:9200/"}
[2020-05-19T19:45:03,974][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>7}
[2020-05-19T19:45:03,974][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2020-05-19T19:45:04,052][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["http://192.168.51.100:9200"]}
[2020-05-19T19:45:04,117][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2020-05-19T19:45:04,132][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge][main] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been created for key: cluster_uuids. This may result in invalid serialization. It is recommended to log an issue to the responsible developer/development team.
[2020-05-19T19:45:04,132][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, "pipeline.sources"=>["C:/Project/Log/sample.conf"], :thread=>"#<Thread:0x3bdb6c5e run>"}
[2020-05-19T19:45:04,210][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date"}, "#version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2020-05-19T19:45:05,271][INFO ][logstash.inputs.file ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"C:/Project/logstash-7.7.0/data/plugins/inputs/file/.sincedb_8d9566297ac4987e711aafe4a88b2724", :path=>["C:/Project/Log/sample.txt"]}
[2020-05-19T19:45:05,302][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2020-05-19T19:45:05,346][INFO ][filewatch.observingtail ][main][253b58041f339951f57d5a400fe9cbebb44b789526885e5c4061ea24665dc057] START, creating Discoverer, Watch with file and sincedb collections
[2020-05-19T19:45:05,348][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2020-05-19T19:45:05,602][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
I am trying to create an index in elasticsearch using a csv file. Below is the configuration.
input {
file {
path => "C:\Users\soumdash\Desktop\Accounts.csv"
start_position => "beginning"
sincedb_path => "NUL"
}
}
filter {
csv{
separator => ","
columns => ["Country_code","Account_number","User_ID","Date","Time"]
}
mutate {convert => ["Account_number","integer"]}
}
output {
elasticsearch {
hosts => ["localhost:9200"]
index => "accounts"
}
stdout {}
}
I am starting the logstash and from the console I can see that it has bee started and the pipeline has been created. But I cannot see the same index in kibana.
C:\Users\soumdash\Desktop\logstash-7.2.0\bin>logstash -f logstash-account.conf
Thread.exclusive is deprecated, use Thread::Mutex
Sending Logstash logs to C:/Users/soumdash/Desktop/logstash-7.2.0/logs which is now configured via log4j2.properties
[2019-07-26T14:01:27,662][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2019-07-26T14:01:27,711][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.2.0"}
[2019-07-26T14:01:42,181][WARN ][logstash.outputs.elasticsearch] You are using a deprecated config setting "document_type" set in elasticsearch. Deprecated settings will continue to work, but are scheduled for removal from logstash in the future. Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature If you have any questions about this, please visit the #logstash channel on freenode irc. {:name=>"document_type", :plugin=><LogStash::Outputs::ElasticSearch index=>"accounts", id=>"b54e1c07198cf188279cb051e01c9fe6118db48fe2ce76739dc2ace82e02c078", hosts=>[//localhost:9200], document_type=>"ERC_Acoounts", enable_metric=>true, codec=><LogStash::Codecs::Plain id=>"plain_57f41853-7ddf-48e5-a5e4-316d94c83a0f", enable_metric=>true, charset=>"UTF-8">, workers=>1, manage_template=>true, template_name=>"logstash", template_overwrite=>false, doc_as_upsert=>false, script_type=>"inline", script_lang=>"painless", script_var_name=>"event", scripted_upsert=>false, retry_initial_interval=>2, retry_max_interval=>64, retry_on_conflict=>1, ilm_enabled=>"auto", ilm_rollover_alias=>"logstash", ilm_pattern=>"{now/d}-000001", ilm_policy=>"logstash-policy", action=>"index", ssl_certificate_verification=>true, sniffing=>false, sniffing_delay=>5, timeout=>60, pool_max=>1000, pool_max_per_route=>100, resurrect_delay=>5, validate_after_inactivity=>10000, http_compression=>false>}
[2019-07-26T14:01:46,248][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://localhost:9200/]}}
[2019-07-26T14:01:46,752][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://localhost:9200/"}
[2019-07-26T14:01:46,852][INFO ][logstash.outputs.elasticsearch] ES Output version determined {:es_version=>7}
[2019-07-26T14:01:46,862][WARN ][logstash.outputs.elasticsearch] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2019-07-26T14:01:46,910][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//localhost:9200"]}
[2019-07-26T14:01:47,046][INFO ][logstash.outputs.elasticsearch] Using default mapping template
[2019-07-26T14:01:47,205][INFO ][logstash.outputs.elasticsearch] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date"}, "#version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2019-07-26T14:01:47,236][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been create for key: cluster_uuids. This may result in invalid serialization. It is recommended to log an issue to the responsible developer/development team.
[2019-07-26T14:01:47,236][INFO ][logstash.javapipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, :thread=>"#<Thread:0x26c630b8 run>"}
[2019-07-26T14:01:52,105][INFO ][logstash.javapipeline ] Pipeline started {"pipeline.id"=>"main"}
[2019-07-26T14:01:52,232][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2019-07-26T14:01:52,249][INFO ][filewatch.observingtail ] START, creating Discoverer, Watch with file and sincedb collections
[2019-07-26T14:01:53,290][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
I have checked and tried a few other answers on the same issue such as
Logstash creates pipeline but index is not created and
Logstash is not creating index in elastic search
But with no success result.
Can anyone please help? I am using ELK 7.2.
Can you use rubydebug inside of stdout,just to make sure that you file is read?
I am trying to read logs from internal S3 storage(not AWS) using logstash. Below is the config. But When i try it always gives no files found in bucket. Through explorer i could see the files. Can someone help me on this. I have tried both .log and .txt files in s3 storage for testing purpose. Nothing works.
input {
s3 {
access_key_id => "xxxxxxxxxxxxxxxxxxxxxx"
secret_access_key => "xxxxxxxxxxxxxxxxxxxxx"
endpoint=>"http://example.com:9020"
bucket => "samplelogs"
temporary_directory=>"C:/xxx/ELK/logstash-6.6.1"
prefix=>"/"
add_field => { source => gzfiles }
type => "s3"
}
}
This is logs
Sending Logstash's logs to C:/XXXX/logstash-6.3.2/logs which is now configured via log4j2.properties
[2019-03-18T15:58:00,879][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2019-03-18T15:58:01,311][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.3.2"}
[2019-03-18T15:58:25,213][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2019-03-18T15:58:25,494][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>, :added=>[http://localhost:9200/]}}
[2019-03-18T15:58:25,509][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://localhost:9200/, :path=>"/"}
[2019-03-18T15:58:25,650][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://localhost:9200/"}
[2019-03-18T15:58:25,681][INFO ][logstash.outputs.elasticsearch] ES Output version determined {:es_version=>6}
[2019-03-18T15:58:25,681][WARN ][logstash.outputs.elasticsearch] Detected a 6.x and above cluster: the type event field won't be used to determine the document _type {:es_version=>6}
[2019-03-18T15:58:25,713][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//localhost:9200"]}
[2019-03-18T15:58:25,728][INFO ][logstash.outputs.elasticsearch] Using mapping template from {:path=>nil}
[2019-03-18T15:58:25,744][INFO ][logstash.outputs.elasticsearch] Attempting to install template {:manage_template=>{"template"=>"logstash-", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"default"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date"}, "#version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
[2019-03-18T15:58:25,759][INFO ][logstash.inputs.s3 ] Registering s3 input {:bucket=>"gvmslogs", :region=>"us-east-1"}
[2019-03-18T15:58:26,041][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x6ac2a093 run>"}
[2019-03-18T15:58:26,103][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>}
[2019-03-18T15:58:26,338][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2019-03-18T15:58:28,400][INFO ][logstash.inputs.s3 ] S3 input: No files found in bucket {:prefix=>"/"}
[2019-03-18T15:59:27,463][INFO ][logstash.inputs.s3 ] S3 input: No files found in bucket {:prefix=>"/"}
I have script that logs temperature + humidity from diffrent sensors and stores the data from each sensor to his directory and every day a new log is made in this format YYYY-MM-DD.log.
${data_root}/A/0/*.log
${data_root}/A/1/*.log
ETC..
the logs are in this format:
2018-03-02 03:48:14 25.00 27.10
(YYYY-MM-DD TIME Temperature Humidity)
I had trouble with understanding how to correctly config my logstash instance, I figured that my input should look something like this:
input {
file{ path => "/var/wlogs/a1/*.log" type=>"a1"}
file{ path => "/var/wlogs/a2/*.log" type=>"a2"}
etc..
}
and the filter should look something like this:
filter{
if [type] == "a1" {
grok {
match => { "message" => "(?<timestamp>%{YEAR}-%{MONTHNUM:month}-%{MONTHDAY:day} %{TIME}) %{NUMBER:temperature:float} %{NUMBER:humidity:float}" }
}
}
if [type] == "a2" {....}
Im trying to export the the data in the output section to ElasticSearch with no success.
output{
elasticsearch { hosts =>["ec2-xxxxxx.eu-west-2.compute.amazonaws.com:9200"] user=>"elastic" password=>"pass" index=>"{type}"}
stdout{ codec => rubydebug}
}
here is the console output when I try to run it:
ubuntu#ip-xxx-xxx:/usr/share/logstash$ sudo bin/logstash -f ~/logstash.conf
WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console
[INFO ] 2018-03-02 13:43:34.633 [main] scaffold - Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}
[INFO ] 2018-03-02 13:43:34.647 [main] scaffold - Initializing module {:module_name=>"netflow", :directory=>"/usr/share/logstash/modules/netflow/configuration"}
[WARN ] 2018-03-02 13:43:35.063 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified
[INFO ] 2018-03-02 13:43:35.209 [LogStash::Runner] runner - Starting Logstash {"logstash.version"=>"6.2.2"}
[INFO ] 2018-03-02 13:43:35.430 [Api Webserver] agent - Successfully started Logstash API endpoint {:port=>9600}
[INFO ] 2018-03-02 13:43:36.145 [Ruby-0-Thread-1: /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/stud-0.0.23/lib/stud/task.rb:22] pipeline - Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[INFO ] 2018-03-02 13:43:36.318 [[main]-pipeline-manager] elasticsearch - Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elastic:xxxxxx#ec2-no.eu-west-2.compute.amazonaws.com:9200/]}}
[INFO ] 2018-03-02 13:43:36.327 [[main]-pipeline-manager] elasticsearch - Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://elastic:xxxxxx#ec2-no.eu-west-2.compute.amazonaws.com:9200/, :path=>"/"}
[WARN ] 2018-03-02 13:43:36.447 [[main]-pipeline-manager] elasticsearch - Restored connection to ES instance {:url=>"http://elastic:xxxxxx#ec2-3no3.eu-west-2.compute.amazonaws.com:9200/"}
[INFO ] 2018-03-02 13:43:36.610 [[main]-pipeline-manager] elasticsearch - ES Output version determined {:es_version=>nil}
[WARN ] 2018-03-02 13:43:36.611 [[main]-pipeline-manager] elasticsearch - Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>6}
[INFO ] 2018-03-02 13:43:36.616 [[main]-pipeline-manager] elasticsearch - Using mapping template from {:path=>nil}
[INFO ] 2018-03-02 13:43:36.619 [[main]-pipeline-manager] elasticsearch - Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"#timestamp"=>{"type"=>"date"}, "#version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
[INFO ] 2018-03-02 13:43:36.626 [[main]-pipeline-manager] elasticsearch - New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//ec2-no.eu-west-2.compute.amazonaws.com:9200"]}
[INFO ] 2018-03-02 13:43:37.054 [Ruby-0-Thread-1: /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/stud-0.0.23/lib/stud/task.rb:22] pipeline - Pipeline started succesfully {:pipeline_id=>"main", :thread=>"#<Thread:0x25b5f422#/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:246 run>"}
[INFO ] 2018-03-02 13:43:37.081 [Ruby-0-Thread-1: /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/stud-0.0.23/lib/stud/task.rb:22] agent - Pipelines running {:count=>1, :pipelines=>["main"]}
please help me figure out what I'm doing wrong and how to fix it :)
thanks in advance
P.S: Im using the latest versions of ElasticSearch, Kibana and Logstash
Don't see any error in the logs. Makes me think that the log files might have already been read in a previous attempt. Since the file offsets are maintained in the sincedb file in home directory, can you stop logstash, delete the file and try again?
For more details about the sincedb file, refer to https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html