logstash.conf file:
input {
file {
type => "java"
path => "/elk/spring-boot-elk.log"
start_position => "beginning"
}
}
filter {
#If log line contains tab character followed by 'at' then we will tag that entry as
stacktrace
if [message] =~ "\tat" {
grok {
match => ["message", "^(\tat)"]
add_tag => ["stacktrace"]
}
}
}
output {
stdout {
codec => rubydebug
}
# Sending properly parsed log events to elasticsearch
elasticsearch {
hosts => ["elasticsearch:9200"]
}
}
docker-compose.yaml file:
version: "3"
services:
elasticsearch:
image: elasticsearch:7.5.2
ports:
- "9200:9200"
- "9300:9300"
environment:
- discovery.type=single-node
kibana:
image: kibana:7.5.2
ports:
- "5601:5601"
links:
- elasticsearch
depends_on:
- elasticsearch
logstash:
image: logstash:7.5.2
links:
- elasticsearch
volumes:
- ./:/config-dir
command: logstash -f /config-dir/logstash.conf
depends_on:
- elasticsearch
All containers are running but there is no data present on kibana.
I think the issue is with logstash.
[2020-04-26T16:37:44,502][WARN ][logstash.outputs.elasticsearch] You are using a deprecated config setting "document_type" set in elasticsearch. Deprecated settings will continue to work, but are scheduled for removal from logstash in the future. Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature If you have any questions about this, please visit the #logstash channel on freenode irc. {:name=>"document_type", :plugin=><LogStash::Outputs::ElasticSearch bulk_path=>"/_monitoring/bulk?system_id=logstash&system_api_version=7&interval=1s", hosts=>[http://elasticsearch:9200], sniffing=>false, manage_template=>false, id=>"7d7dfa0f023f65240aeb31ebb353da5a42dc782979a2bd7e26e28b7cbd509bb3", document_type=>"%{[#metadata][document_type]}", enable_metric=>true, codec=><LogStash::Codecs::Plain id=>"plain_1a08e50c-ae97-4f38-a5b7-7aa70df94f4a", enable_metric=>true, charset=>"UTF-8">, workers=>1, template_name=>"logstash", template_overwrite=>false, doc_as_upsert=>false, script_type=>"inline", script_lang=>"painless", script_var_name=>"event", scripted_upsert=>false, retry_initial_interval=>2, retry_max_interval=>64, retry_on_conflict=>1, ilm_enabled=>"auto", ilm_rollover_alias=>"logstash", ilm_pattern=>"{now/d}-000001", ilm_policy=>"logstash-policy", action=>"index", ssl_certificate_verification=>true, sniffing_delay=>5, timeout=>60, pool_max=>1000, pool_max_per_route=>100, resurrect_delay=>5, validate_after_inactivity=>10000, http_compression=>false>}
[2020-04-26T16:37:44,544][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2020-04-26T16:37:44,550][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2020-04-26T16:37:44,555][INFO ][logstash.outputs.elasticsearch] ES Output version determined {:es_version=>7}
[2020-04-26T16:37:44,555][WARN ][logstash.outputs.elasticsearch] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2020-04-26T16:37:44,586][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["http://elasticsearch:9200"]}
[2020-04-26T16:37:44,597][INFO ][logstash.javapipeline ] Starting pipeline {:pipeline_id=>".monitoring-logstash", "pipeline.workers"=>1, "pipeline.batch.size"=>2, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>2, "pipeline.sources"=>["monitoring pipeline"], :thread=>"#<Thread:0x61a1055 run>"}
[2020-04-26T16:37:44,636][INFO ][logstash.javapipeline ] Pipeline started {"pipeline.id"=>".monitoring-logstash"}
[2020-04-26T16:37:44,654][INFO ][logstash.agent ] Pipelines running {:count=>2, :running_pipelines=>[:main, :".monitoring-logstash"], :non_running_pipelines=>[]}
[2020-04-26T16:37:44,899][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
Related
Need your support, As the below image I configure ELK and deploy it in a separate server. it's working inside the same server but I'm trying to access Logstash over the same network on other servers.
My question is possible on local env?.
Logstash Configuration
# Read input from filebeat by listening to port 5044 on which filebeat will send the data
input {
beats {
host => "0.0.0.0"
port => "5044"
}
}
output {
stdout {
codec => rubydebug
}
# Sending properly parsed log events to elasticsearch
elasticsearch {
hosts => ["localhost:9200"]
index => "e-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
}
File Beats Configuration
filebeat.inputs:
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/*.log
- E:\IMR-App\imrh\logs\imrh.log
# ------------------------------ Logstash Output -------------------------------
output.logstash:
# The Logstash hosts
hosts: ["0.0.0.0:5044"]
I installed a filebeat -> logstash -> elasticsearch -> kibana stack in Kubernetes with helm charts :
helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
helm install --name elastic --namespace monitoring incubator/elasticsearch --set client.replicas=1,master.replicas=2,data.replicas=1
helm install --name logstash --namespace monitoring incubator/logstash -f logstash_values.yaml
helm install --name filebeat stable/filebeat -f filebeat_values.yaml
helm install stable/kibana --name kibana --namespace monitoring
The logs are indexed in ES, but the "message" contains the whole string, not the defined fields. My grok filter doesn't seem to work in logstash conf.
The is no documentation on https://github.com/helm/charts/tree/master/incubator/logstash about how to set the patterns.
Here is what I tried :
my log's format :
10-09-2018 11:57:55.906 [Debug] [LOG] serviceName - Technical - my specific message - correlationId - userId - data - operation - error - stackTrace escaped on one line
logstash_values.yaml (from https://github.com/helm/charts/blob/master/incubator/logstash/values.yaml) :
elasticsearch:
host: elasticsearch-client.default.svc.cluster.local
port: 9200
patterns:
main: |-
(?<time>(?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?)\.(?:[0-9]){3})} [(?<logLevel>.*)] [(?<code>.*)] (?<caller>.*) - (?<logMessageType>.*) - (?<message>.*) - (?<correlationId>.*) - (?<userId>.*) - (?<data>.*) - (?<operation>.*) - (?<error>.*) - (?<stackTrace>.*)
inputs:
main: |-
input {
beats {
port => 5044
}
}
filters:
outputs:
main: |-
output {
elasticsearch {
hosts => ["${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}"]
manage_template => false
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
}
This becomes a Kubernetes configMap "logstash-patterns" :
apiVersion: v1
kind: ConfigMap
data:
main: (?<time>(?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?)\.(?:[0-9]){3}) [(?<code>.*)] [(?<logLevel>.*)] (?<service>.*) - (?<logMessageType>.*) - (?<message>.*) - (?<correlationId>.*) - (?<userId>.*) - (?<data>.*) - (?<operation>.*) - (?<error>.*) - (?<stackTrace>.*)
I don't see any error logs in logstash pod.
Do you have any idea how to configure patterns in logstash in Kubernetes ?
Thanks.
I was mistaking "pattern" and "filter".
In the Helm chart, "pattern" is for specifying our custom grok patterns (https://grokdebug.herokuapp.com/patterns) :
MY_CUSTOM_ALL_CHARS .*
My grok filter should be in the filter section :
patterns:
# nothing here for me
filters:
main: |-
filter {
grok {
match => { "message" => "\{%{TIMESTAMP_ISO8601:time}\} \[%{DATA:logLevel}\] \[%{DATA:code}\] %{DATA:caller} &\$ %{DATA:logMessageType} &\$ %{DATA:message} &\$ %{DATA:correlationId} &\$ %{DATA:userId} &\$ %{DATA:data} &\$ %{DATA:operation} &\$ %{DATA:error} &\$ (?<stackTrace>.*)" }
overwrite => [ "message" ]
}
date {
match => ["time", "ISO8601"]
target => "time"
}
}
I'm trying to add logs of apache using kibana elasticsearch and logstash. but logstash didn't create index to elastticsearch , so I'm not able to visualize data in kibana
this is my docker-compose :
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.4
container_name: elasticsearch
hostname: elasticsearch
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms512m -Xmx512m
- http.cors.enabled=true
- http.cors.allow-origin= "*"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata1:/usr/share/elasticsearch/data
ports:
- 9200:9200
logstash:
image: docker.elastic.co/logstash/logstash-oss:6.2.4
restart: unless-stopped
depends_on:
- elasticsearch
volumes:
- ./logstash-apache.conf:/opt/logstash/logstash-apache.conf
- ./logs:/logs/access_log
links:
- elasticsearch
command: logstash -f /opt/logstash/logstash-apache.conf
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.2.4
container_name: kibana
volumes:
- esdata2:/usr/share/kibana/config/data
ports:
- 5601:5601
depends_on:
- elasticsearch
links:
- elasticsearch
volumes:
esdata1:
driver: local
esdata2:
driver: local
this my Logstash-apache.conf
input {
file {
type => "apache_access"
path => "/var/log/httpd/access_log"
start_position => beginning
}
}
filter {
if [type] == "apache_access" {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}( \*\*%{POSINT:responsetime}\*\*)?" }
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "apache_logstash-%{+YYYY.MM.dd}"
}
}
This the output mesg:
logstash_1| [2018-07-18T23:55:25,926][INFO ][logstash.agent] Pipelines running {:count=>1, :pipelines=>["main"]}
I have no error in my output but the problem that logstash not producing data
what I shall do ? could any one help me please ?
I am new to ELK stack. I am trying to setup FileBeat --> Logstash --> ElasticSearch --> Kibana. Here while trying to send FileBeat output to Logstash input I am getting below error on Logstash side:
CircuitBreaker::rescuing exceptions {:name=>"Beats input", :exception=>LogStash::Inputs::Beats::InsertingToQueueTakeTooLong, :level=>:warn}
Beats input: The circuit breaker has detected a slowdown or stall in the pipeline, the input is closing the current connection and rejecting new connection until the pipeline recover. {:exception=>LogStash::Inputs::BeatsSupport::CircuitBreaker::HalfOpenBreaker, :level=>:warn}
I am using Logstash 2.3.2 version with FileBeat: 1.2.2, elasticsearch: 2.2.1
my logstash config:
input {
beats {
port => 5044
# codec => multiline {
# pattern => "^%{TIME}"
# negate => true
# what => previous
# }
}
}
filter {
grok {
match => { "message" => "^%{TIME:time}\s+%{LOGLEVEL:level}" }
}
}
output {
elasticsearch {
hosts => ["host:9200"]
manage_template => false
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
}
my filebeat config:
filebeat:
prospectors:
- paths: - *.log
input_type: log
tail_files: false
output:
logstash:
hosts: ["host:5044"]
compression_level: 3
shipper:
logging:
to_files: true
files:
path: /tmp
name: mybeat.log
level: error
So I am using docker-compose to launch the ELK stack, which will be filled by filebeats... my config is something like this:
elasticsearch:
image: elasticsearch:latest
command: elasticsearch -Des.network.host=_non_loopback_
ports:
- "9200:9200"
- "9300:9300"
logstash:
image: logstash:latest
command: logstash -f /etc/logstash/conf.d/logstash.conf -b 10000 -w 1
volumes:
- ./logstash/config:/etc/logstash/conf.d
ports:
- "5044:5044"
links:
- elasticsearch
environment:
- LS_HEAP_SIZE=2048m
kibana:
build: kibana/
volumes:
- ./kibana/config/:/opt/kibana/config/
ports:
- "5601:5601"
links:
- elasticsearch
My logstash.conf file looks something like this:
input {
beats {
port => 5044
}
}
....
output {
elasticsearch {
hosts => "localhost:9200"
manage_template => false
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
}
These docker containers are running on the same instance and I have confirmed being able to hit both ports externally.
The error which appears during a sync of a file from filebeat is:
logstash_1 | {:timestamp=>"2016-05-19T19:52:55.167000+0000", :message=>"Attempted to send a bulk request to Elasticsearch configured at '[\"http://localhost:9200/\"]', but Elasticsearch appears to be unreachable or down!", :error_message=>"Connection refused", :class=>"Manticore::SocketException", :client_config=>{:hosts=>["http://localhost:9200/"], :ssl=>nil, :transport_options=>{:socket_timeout=>0, :request_timeout=>0, :proxy=>nil, :ssl=>{}}, :transport_class=>Elasticsearch::Transport::Transport::HTTP::Manticore, :logger=>nil, :tracer=>nil, :reload_connections=>false, :retry_on_failure=>false, :reload_on_failure=>false, :randomize_hosts=>false, :http=>{:scheme=>"http", :user=>nil, :password=>nil, :port=>9200}}, :level=>:error}
Thanks,
You try to reach elasticsearch on localhost, but it's not possible, in this case localhost is the docker container containing logstash.
You have to access it via the link :
output {
elasticsearch {
hosts => "elasticsearch:9200"
manage_template => false
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
}
or, if you want to access your elasticsearch instance from "outside" instead of localhost, fill your ip (not 127.0.0.1)