Fields not being imported to elasticsearch - elasticsearch

I have an ELK instance that uses a redis channel as a buffer. Logs are imported, correctly parsed into redis by the shipper but nothing makes it to elasticsearch.
My shipper config looks like this:
input {
file {
path => [ "/var/log/aggregates.log" ]
type => "aggregates"
}
}
output {
redis {
host => "xxxx"
data_type => "channel"
key => "logstash-aggregates"
}
}
filter {
csv {
columns => [ 'start_time', 'end_time','total_count' ... ]
separator => ","
}
}
The indexer config looks like this:
input {
redis {
host => "xxxx"
type => "aggregates"
data_type => "channel"
key => "logstash-aggregates"
format => "json_event"
}
}
output {
elasticsearch {
bind_host => "xxxx"
cluster => "default_cluster"
host => "xxxx"
action => "index"
}
}
Is there something I'm missing here? I can't seem to figure it out.

Related

Logstash, how to send logs from specific source to specific index

I'm trying to send logs from specific source to specific index.
So in logstash.conf i did the following:
input {
gelf {
port => 12201
# type => docker
use_tcp => true
tags => ["docker"]
}
filter {
if "test_host" in [_source][host] {
mutate { add_tag => "test_host"}
}
output {
if "test_host" in [tags] {
stdout { }
opensearch {
hosts => ["https://opensearch:9200"]
index => "my_host_index"
user => "administrator"
password => "some_password"
ssl => true
ssl_certificate_verification => false
}
}
But unfortunately it's not working.
What am i doing wrong?
Thanks.

How to split file name in logstash?

I am injecting a file from the s3 bucket to logstash, My file name is containing some information, I want to split the file name into multiple fields, so I can use them as separate fields. Please help me I am new with elk.
input {
s3 {
bucket => "***********"
access_key_id => "***********"
secret_access_key => "*******"
"region" => "*********"
"prefix" => "Logs"
"interval" => "1"
"additional_settings" => {
"force_path_style" => true
"follow_redirects" => false
}
}
}
filter {
mutate {
add_field => {
"file" => "%{[#metadata][s3][key]}" //This file name have to split
}
}
}
output {
elasticsearch {
hosts => ["localhost:9200"]
index => "indexforlogstash"
}
}
In the filter section you can leverage the dissect filter in order to achieve what you want:
filter {
...
dissect {
mapping => {
"file" => "Logs/%{deviceId}-%{buildId}-log.txt"
}
}
}
After going through this filter, your document is going to get two new fields, namely:
deviceId (1232131)
buildId (custombuildv12)

logstash create strange index name

i use logstash 7.9.3 and with this version i have problems to create right index name like logstash-2021.01.01. I need first 9 days of month with 0.
with this config logstash-%{+yyyy.MM.dd} result is => logstash-2021.01.01-000001
with this config logstash-%{+yyyy.MM.d} result is => logstash-2021.01.1
input {
redis {
host => "someip_of_redis"
data_type => "list"
key => "logstash"
codec => "json"
}
}
output {
elasticsearch {
hosts => ["http://someip_of_elastic:9200"]
index => "logstash-%{+yyyy.MM.dd}"
}
}
Thank you in advance
to disable it, i add to config following ilm_enabled => false
input {
redis {
host => "someip_of_redis"
data_type => "list"
key => "logstash"
codec => "json"
}
}
output {
elasticsearch {
hosts => ["http://someip_of_elastic:9200"]
ilm_enabled => false
index => "logstash-%{+yyyy.MM.dd}"
}
}

Logstash redis configuration not pushing logs to ES

We are using ELK stack to monitor our logs. I am total newbie to ELK environment, recently i was working on a task where I need to configure Logstash with Redis to push our logs to,
below is the config i am using, the below config works with ElasticSearch but does not work with Redis,
input {
file {
path => "E:/Logs/**/*.log"
start_position => beginning
codec => json
}
}
filter {
date {
match => [ "TimeCreated", "YYYY-MM-dd HH:mm:ss Z" ]
}
mutate {
add_field => {
#"debug" => "true"
"index_prefix" => "logstash-app"
}
}
}
output {
#elasticsearch {
#local env
#hosts => ["localhost:9200"]
#preprod env
#hosts => ["elk.logs.abc.corp.com:9200"]
#prod env
#hosts => ["elk.logs.abc.prod.com:9200"]
#index => "logstash-app"
#}
redis {
#local env
#host => "localhost:5300"
#preprod env
host => "redis.logs.abc.corp.com"
#prod env
#host => "redis.logs.abc.prod.com"
data_type => "list"
key => "logstash"
}
if[debug] == "true" {
stdout {
codec => rubydebug
}
file {
path => "../data/logstash-app-%{+YYYYMMdd}.log"
}
}
}
I commented the Elasticsearch, with Elastic Search i am able to view the logs in Kibana but with Redis i am unable to see,
Can anyone point me what wrong i am doing ? how could i debug or see if my logs are shipped correctly
Based on the documentation of logstash plugin:
host should be an array
redis {
#preprod env
host => ["redis.logs.abc.corp.com"]
data_type => "list"
key => "logstash"
}

Can I use mutate filter in Logstash to convert some fields to integers of a genjdbc input?

I am using genjdbc input plugin for Logstash to get data from a DB2 database. It works perfectly, I get in Kibana all the database columns as fields.
The problem I have is that in Kibana all fields are string type, and I want the numeric fields to be integers. I have tried the following code, but the result is the same that if no filter clause exists.
Can someone help me solving this? Thanks in advance!
The logstash.conf code:
input {
genjdbc {
jdbcHost => "XXX.XXX.XXX.XXX"
jdbcPort => "51260"
jdbcTargetDB => "db2"
jdbcDBName => "XXX"
jdbcUser => "XXX"
jdbcPassword => "XXX"
jdbcDriverPath => "C:\...\db2jcc4.jar"
jdbcSQLQuery => "SELECT * FROM XXX1"
jdbcTimeField => "LOGSTAMP"
jdbcPStoreFile => "C:\elk\logstash\bin\db2.pstore"
jdbcURL => "jdbc:db2://XXX.XXX.XXX.XXX:51260/XXX"
type => "table1"
}
genjdbc {
jdbcHost => "XXX.XXX.XXX.XXX"
jdbcPort => "51260"
jdbcTargetDB => "db2"
jdbcDBName => "XXX"
jdbcUser => "XXX"
jdbcPassword => "XXX"
jdbcDriverPath => "C:\...\db2jcc4.jar"
jdbcSQLQuery => "SELECT * FROM XXX2"
jdbcTimeField => "LOGSTAMP"
jdbcPStoreFile => "C:\elk\logstash\bin\db2.pstore"
jdbcURL => "jdbc:db2://XXX.XXX.XXX.XXX:51260/XXX"
type => "table2"
}
}
filter {
mutate {
convert => [ "T1", "integer" ]
convert => [ "T2", "integer" ]
convert => [ "T3", "integer" ]
}
}
output {
if [type] == "table1" {
elasticsearch {
host => "localhost"
protocol => "http"
index => "db2_1-%{+YYYY.MM.dd}"
}
}
if [type] == "table2" {
elasticsearch {
host => "localhost"
protocol => "http"
index => "db2_2-%{+YYYY.MM.dd}"
}
}
}
What you have should work as long as the fields you are trying to convert to integer are names T1,T2,T3 and you are inserting into an index that doesn't have any data. If you already have data in the index, you'll need to delete the index so that logstash can recreate it with the correct mapping.

Resources