Elasticsearch: Duplicates caused by overwriting log files - elasticsearch

I'm using ELK stack. Log files are saved every 5 min by the simple Java app. Then Filebeat throws them to Logstash. Because of overwriting same messages are getting indexed (their fingerprints are identical). The only difference is the document id. Elasticsearch gives new id to documents everytime they get overwritten. How can I get rid of duplicates or keep document id the same?
Logstash input:
input {
beats {
port => 5044
ssl => false
ssl_certificate => "/etc/pki/tls/certs/logstash-beats.crt"
client_inactivity_timeout => 200
ssl_key => "/etc/pki/tls/private/logstash-beats.key"
}
}
filter {
if [fields][log_type] == "access" {
grok {
match => [ "message", "%{IP:client_ip} %{USER:ident} %{USER:auth} \[%{HTTPDATE:apache_timestamp}\] \"%{WORD:method} /%{WORD:servername}/%{NOTSPACE:requestpage} HTTP/%{NUMBER:http_version}\" %{NUMBER:server_response} %{NUMBER:answer_size}" ]
}
}
else if [fields][log_type] == "errors" {
grok {
match => {"message" => "%{DATESTAMP:maximotime}(.*)SystemErr"}
}
date {
timezone => "Europe/Moscow"
match => ["maximotime", "dd.MM.yy HH:mm:ss:SSS"]
}
mutate {
copy => { "message" => "key" }
}
mutate {
gsub => [
"message", ".*SystemErr R ", "",
"key", ".*SystemErr R", ""
]
}
truncate {
fields => "key"
length_bytes => 255
}
fingerprint {
method => "SHA1"
source => ["key"]
}
if "_grokparsefailure" in [tags] {
drop { }
}
} else if [fields][log_type] == "info" {
grok {
match => {"message" => ["%{TIMESTAMP_ISO8601:maximotime}.* ПОЛЬЗОВАТЕЛЬ = \(%{WORD:username}.*программа \(%{WORD:appname}\).*объект \(%{WORD:object}\).*: %{GREEDYDATA:sql} \(выполнение заняло %{NUMBER:execution} миллисекунд\) \{conditions:%{GREEDYDATA:conditions}\}", "%{TIMESTAMP_ISO8601:maximotime}.* ПОЛЬЗОВАТЕЛЬ = \(%{WORD:username}.*программа \(%{WORD:appname}\).*объект \(%{WORD:object}\).*: %{GREEDYDATA:sql} \{conditions:%{GREEDYDATA:conditions}\}", "%{TIMESTAMP_ISO8601:maximotime}.* ПОЛЬЗОВАТЕЛЬ = \(%{WORD:username}.*программа \(%{WORD:appname}\).*объект \(%{WORD:object}\).*: %{GREEDYDATA:sql} \(выполнение заняло %{NUMBER:execution} миллисекунд\)"]}
add_field => {
"type" => "conditions"
}
}
mutate {
convert => {
"execution" => "integer"
}
}
fingerprint {
method => "SHA1"
source => ["message"]
}
if "_grokparsefailure" in [tags] {
grok {
match => {"message" => "%{TIMESTAMP_ISO8601:maximotime} (.*)getMboCount %{WORD:object}: mbosets \(%{WORD:mbosets}\), mbos \(%{WORD:mbos}\)"}
add_field => {
"type" => "maximoObjectCount"
}
remove_tag => ["_grokparsefailure"]
}
mutate {
convert => {
"mbosets" => "integer"
"mbos" => "integer"
}
}
fingerprint {
method => "SHA1"
source => ["message"]
}
if "_grokparsefailure" in [tags] {
drop { }
}
}
date {
timezone => "Europe/Moscow"
match => ["maximotime", "yyyy-MM-dd HH:mm:ss:SSS"]
target => "maximotime"
}
}
}
Logstash output:
output {
stdout {codec => rubydebug}
if [fields][log_type] == "access" {
elasticsearch {
hosts => ["localhost"]
manage_template => false
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
} else if [fields][log_type] == "errors"{
elasticsearch {
hosts => ["localhost"]
manage_template => false
index => "%{[#metadata][beat]}-error-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
}
} else if [fields][log_type] == "info"{
elasticsearch {
hosts => ["localhost"]
manage_template => false
index => "%{[#metadata][beat]}-info-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
document_id => "%{fingerprint}"
}
}
}
Filebeat.yml:
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_cloud_metadata: ~
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/integration/*.log
fields: {log_type: access}
- type: log
enabled: true
paths:
- /var/log/maximo_error_logs/*.log
fields: {log_type: errors}
exclude_lines: '^((\*+)|Log file started at:)'
multiline.pattern: '(^$|(\t|\s)at .*|.*Caused by:.*|.*SystemErr( ){5}R[ \t]{2}at .*|^ru.ocrv..*|^(\s|\t|)null.*|Обратитесь за.*|.*Закрытое со.*|^(\s|\t|)(ORA-.*|BMX.*)|^(\\s|\t)[А-Яа-я].*)|(.*\d more$)'
multiline.negate: false
multiline.match: after
- type: log
enabled: true
paths:
- /var/log/maximo_logs/*.log
fields: {log_type: info}
output.logstash:
hosts: ["elk:5044"]
bulk_max_size: 200

I'm dumb. I was restarting Filebeat container instead of ELK, so my Logstash configs wasn't applying... Now it's working and my Logstash output config looks like this:
document_id => "%{type}-%{fingerprint}"
action => "create"

Related

Order the elastic search fields using logstash

Hi Below is my config i am trying to index the data from MSSQL to Elasticsearch using Logstash but field arrangement is not expected manner
input {
jdbc {
jdbc_driver_library => "D:\Users\mmurugesan\Desktop\driver\mssql-jdbc-7.4.1.jre12-shaded.jar"
jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
jdbc_connection_string => "jdbc:sqlserver://EC2AMAZ-J90JR4A\SQLEXPRESS:1433;databaseName=xxxx;"
jdbc_user => "xxx"
jdbc_password => "xxxx"
jdbc_paging_enabled => true
tracking_column => modified_date
use_column_value => true
clean_run => true
tracking_column_type => "timestamp"
schedule => "*/1 * * * *"
statement => "Select pl.Policyholdername,pl.Age,pl.Dob,pl.Client_Address,cl.claimnumber,cl.claimtype,cl.is_active,cl.Modified_date from claim cl
inner join Policy pl on pl.Policynumber=cl.policynumber
where cl.Modified_date >:sql_last_value"
last_run_metadata_path => "D:\Users\mmurugesan\Desktop\logstash-7.5.2\jdbc_lastrun\jdbc_last_run.txt"
jdbc_default_timezone => "UTC"
}
}
filter {
if [is_active] {
mutate {
add_field => {
"[#metadata][elasticsearch_action]" => "index"
}
}
mutate {
remove_field => [ "is_active","#version","#timestamp" ]
}
} else {
mutate {
add_field => {
"[#metadata][elasticsearch_action]" => "delete"
}
}
mutate {
remove_field => [ "is_active","#version","#timestamp" ]
}
}
}
output {
elasticsearch {
hosts => "xxx"
user => "xxx"
password => "xxx"
index => "duckcreek"
action => "%{[#metadata][elasticsearch_action]}"
document_type => "_doc"
document_id => "%{claimnumber}"
}
stdout { codec => rubydebug }
}
After indexing the data to elastic search fields in elastic search not arranged as expected attached screen shot for reference.
Is it possible to arrange the field setup in elastic search using logstash?

Logstash : Is there a way to change some of the properties in document while migrating

I have been migrating some of the indexes from self-hosted Elasticsearch to AmazonElasticSearch using Logstash. While migrating the documents, We need to change the field names in the index based on some logic.
Our Logstash Config file
input {
elasticsearch {
hosts => ["https://staing-example.com:443"]
user => "userName"
password => "password"
index => "testingindex"
size => 100
scroll => "1m"
}
}
filter {
}
output {
amazon_es {
hosts => ["https://example.us-east-1.es.amazonaws.com:443"]
region => "us-east-1"
aws_access_key_id => "access_key_id"
aws_secret_access_key => "access_key_id"
index => "testingindex"
}
stdout{
codec => rubydebug
}
}
Here it is one of the documents for the testingIndex from our self-hosted elastic search
{
"uniqueIdentifier" => "e32d331b-ce5f-45c8-beca-b729707fca48",
"createdDate" => 1527592562743,
"interactionInfo" => [
{
"value" => "Hello this is testing",
"title" => "msg",
"interactionInfoId" => "8c091cb9-e51b-42f2-acad-79ad1fe685d8"
},
{
**"value"** => """"{"edited":false,"imgSrc":"asdfadf/soruce","cont":"Collaborated in <b class=\"mention\" gid=\"4UIZjuFzMXiu2Ege6cF3R4q8dwaKb9pE\">#2222222</b> ","chatMessageObjStr":"Btester has quoted your feed","userLogin":"test.comal#google.co","userId":"tester123"}"""",
"title" => "msgMeta",
"interactionInfoId" => "f6c7203b-2bde-4cc9-a85e-08567f082af3"
}
],
"componentId" => "compId",
"status" => [
"delivered"
]
},
"accountId" => "test123",
"applicationId" => "appId"
}
This is what we are expecting when documents get migrated to our AmazonElasticSearch
{
"uniqueIdentifier" => "e32d331b-ce5f-45c8-beca-b729707fca48",
"createdDate" => 1527592562743,
"interactionInfo" => [
{
"value" => "Hello this is testing",
"title" => "msg",
"interactionInfoId" => "8c091cb9-e51b-42f2-acad-79ad1fe685d8"
},
{
**"value-keyword"** => """"{"edited":false,"imgSrc":"asdfadf/soruce","cont":"Collaborated in <b class=\"mention\" gid=\"4UIZjuFzMXiu2Ege6cF3R4q8dwaKb9pE\">#2222222</b> ","chatMessageObjStr":"Btester has quoted your feed","userLogin":"test.comal#google.co","userId":"tester123"}"""",
"title" => "msgMeta",
"interactionInfoId" => "f6c7203b-2bde-4cc9-a85e-08567f082af3"
}
],
"componentId" => "compId",
"status" => [
"delivered"
]
},
"accountId" => "test123",
"applicationId" => "appId"
}
What we need is to change the "value" field to "value-keyword" wherever we find some JSON format. Is there any other filter in Logstash to achieve this
As documented in the Logstash website:
https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html#plugins-filters-mutate-rename
You can use the mutate filter, applying the rename function.
For example:
filter {
mutate {
replace => { "old-field" => "new-field" }
}
}
For nested fields, you could just pass the path of the field:
filter {
mutate {
replace => { "[interactionInfo][value]" => "[interactionInfo][value-keyword]" }
}
}
Try adding this to your filter:
filter {
ruby {
code => "event.get('interactionInfo').each { |item| if item['value'].match(/{.+}/) then item['value-keyword'] = item.delete('value') end }"
}
}

Logstash pipeline template for Spring Boot deployed to Cloud Foundry

I am looking for a very basic pipeline template that allows me to correctly index all available fields of a log-message.
I use Spring Boot (2.1.x) out of the box, deploy it to Cloud Foundry and log via stdout/logdrain to Logstash and eventually to Elasticsearch.
I already searched the internet and found only one template for Cloud Foundry apps:
input {
http {
port => "5044"
user => "inputuser"
password => "inputpassword"
}
}
filter {
grok {
#patterns_dir => "{{ .Env.HOME }}/grok-patterns"
match => { "message" => "%{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(?:%{NOTSPACE:syslog5424_app}|-) +(?:%{NOTSPACE:syslog5424_proc}|-) +(?:%{WORD:syslog5424_msgid}|-) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|)%{SPACE}%{GREEDYDATA:message}" }
add_tag => [ "CF","CF-%{syslog5424_proc}","_grokked"]
add_field => { "format" => "cf" }
tag_on_failure => [ ]
overwrite => [ "message" ]
}
if [syslog5424_proc] =~ /(A[pP]{2}.+)/ {
mutate { add_tag => ["CF-APP"] }
mutate { remove_tag => ["_grokked"] }
}
if ("CF-APP" in [tags]) or !("CF" in [tags]) {
if [message] =~ /^{.*}/ {
json {
source => "message"
add_tag => [ "json", "_grokked"]
}
}
}
if !("_grokked" in [tags]) {
mutate{
add_tag => [ "_ungrokked" ]
}
}
}
output {
#stdout { codec => rubydebug }
if ("_grokked" in [tags]) {
elasticsearch {
hosts => ["https://ac9537fc444c489bb63ac44064c54519.elasticsearch.lyra-836.appcloud.swisscom.com"]
user => "myuser"
password => "mypassword"
ssl => true
ssl_certificate_verification => true
codec => "plain"
workers => 1
index => "parsed-%{+YYYY.MM.dd}"
manage_template => true
template_name => "logstash"
template_overwrite => true
}
} else {
elasticsearch {
hosts => ["https://ac9537fc848c489bb63ac44064c54519.elasticsearch.lyra-836.appcloud.swisscom.com"]
user => "myuser"
password => "mypassword"
ssl => true
ssl_certificate_verification => true
codec => "plain"
workers => 1
index => "unparsed-%{+YYYY.MM.dd}"
manage_template => true
template_name => "logstash"
template_overwrite => true
}
}
}
This already looks quite verbose and covers only Cloud Foundry fields but ignores all application specific fields, like a log-level (which doesn't follow a key/value notation but rather a fixed position in a log-message).
One example log-message is:
2019-10-03T09:20:09.37+0200 [APP/PROC/WEB/0] OUT 2019-10-03 09:20:09.378 INFO 19 --- [ main] o.s.b.a.e.web.EndpointLinksResolver : Exposing 2 endpoint(s) beneath base path '/actuator'
Any help is appreciated, thank you very much!
Update: Based on the first comment, I configured my Spring Boot application to log messages as json. In Cloud Foundry, I send those logs with a user-provided service configured as logdrain to Logstash. Logstash receives the message like this:
<14>1 2019-10-03T17:29:17.547195+00:00 cf-organization.cf-space.cf-app abc9dac6-1234-4b62-9eb4-98d1234d9ace [APP/PROC/WEB/1] - - {"app":"cf-app","ts":"2019-10-03T17:29:17.546+00:00","logger":"org.springframework.boot.web.embedded.netty.NettyWebServer","level":"INFO","class":"org.springframework.boot.web.embedded.netty.NettyWebServer","method":"start","file":"NettyWebServer.java","line":76,"thread":"main","msg":"Netty started on port(s): 8080"}
Using the above filter, Logstash parses it to this json:
{
"syslog5424_ts": "2019-10-03T17:29:17.547195+00:00",
"syslog5424_pri": "14",
"syslog5424_ver": "1",
"message": "{\"app\":\"cf-app\",\"ts\":\"2019-10-03T17:29:17.546+00:00\",\"logger\":\"org.springframework.boot.web.embedded.netty.NettyWebServer\",\"level\":\"INFO\",\"class\":\"org.springframework.boot.web.embedded.netty.NettyWebServer\",\"method\":\"start\",\"file\":\"NettyWebServer.java\",\"line\":76,\"thread\":\"main\",\"msg\":\"Netty started on port(s): 8080\"}",
"syslog5424_app": "abc9dac6-1234-4b62-9eb4-98d1234d9ace",
"syslog5424_proc": "[APP/PROC/WEB/1]",
"syslog5424_host": "cf-organization.cf-space.cf-app"
}
How would I have to adjust grok/output to simply send the value of key message as json to Elasticsearch?
Ok, so I managed to do this with the following steps and thanks to this nice article:
Spring Boot app
Add this dependency
implementation 'net.logstash.logback:logstash-logback-encoder:5.2'
Add this src/main/resources/logback-spring.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<property resource="application.properties"/>
<contextName>${spring.application.name}</contextName>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<contextName>
<fieldName>app</fieldName>
</contextName>
<timestamp>
<fieldName>ts</fieldName>
<timeZone>UTC</timeZone>
</timestamp>
<loggerName>
<fieldName>logger</fieldName>
</loggerName>
<logLevel>
<fieldName>level</fieldName>
</logLevel>
<callerData>
<classFieldName>class</classFieldName>
<methodFieldName>method</methodFieldName>
<lineFieldName>line</lineFieldName>
<fileFieldName>file</fileFieldName>
</callerData>
<threadName>
<fieldName>thread</fieldName>
</threadName>
<mdc/>
<arguments>
<includeNonStructuredArguments>false</includeNonStructuredArguments>
</arguments>
<stackTrace>
<fieldName>stack</fieldName>
</stackTrace>
<message>
<fieldName>msg</fieldName>
</message>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>
Add these properties
spring.application.name=<app-name>
spring.main.banner-mode=OFF
This will generate logs that look like this:
<14>1 2019-10-03T17:29:17.547195+00:00 cf-organization.cf-space.cf-app abc9dac6-1234-4b62-9eb4-98d1234d9ace [APP/PROC/WEB/1] - - {"app":"cf-app","ts":"2019-10-03T17:29:17.546+00:00","logger":"org.springframework.boot.web.embedded.netty.NettyWebServer","level":"INFO","class":"org.springframework.boot.web.embedded.netty.NettyWebServer","method":"start","file":"NettyWebServer.java","line":76,"thread":"main","msg":"Netty started on port(s): 8080"}
Now we need to parse the prepended text and add its values to the logged message.
Logstash-Pipeline
input {
http {
port => "5044"
user => "exampleUser"
password => "examplePassword"
}
}
filter{
grok {
#patterns_dir => "{{ .Env.HOME }}/grok-patterns"
match => { "message" => "%{SYSLOG5424PRI}%{NONNEGINT:syslog5424_ver} +(?:%{TIMESTAMP_ISO8601:syslog5424_ts}|-) +(?:%{HOSTNAME:syslog5424_host}|-) +(?:%{NOTSPACE:syslog5424_app}|-) +(?:%{NOTSPACE:syslog5424_proc}|-) +(?:%{WORD:syslog5424_msgid}|-) +(?:%{SYSLOG5424SD:syslog5424_sd}|-|)%{SPACE}%{GREEDYDATA:message}" }
add_tag => [ "CF", "CF-%{syslog5424_proc}", "parsed"]
add_field => { "format" => "cf" }
tag_on_failure => [ ]
overwrite => [ "message" ]
}
mutate {
split => ["syslog5424_host", "."]
add_field => { "cf-org" => "%{[syslog5424_host][0]}" }
add_field => { "cf-space" => "%{[syslog5424_host][1]}" }
add_field => { "cf-app" => "%{[syslog5424_host][2]}" }
}
if [syslog5424_proc] =~ /\[(A[pP]{2}.+)/ {
mutate { add_tag => ["CF-APP"] }
mutate { remove_tag => ["parsed"] }
}
if ("CF-APP" in [tags]) or !("CF" in [tags]) {
if [message] =~ /^{.*}/ {
json {
source => "message"
add_tag => [ "json", "parsed"]
}
}
}
if !("CF-APP" in [tags]) {
mutate {
add_field => { "msg" => "%{[message]}" }
add_tag => [ "CF-PAAS"]
}
}
if !("parsed" in [tags]) {
mutate{
add_tag => [ "unparsed" ]
}
}
}
output {
if ("parsed" in [tags]) {
elasticsearch {
hosts => ["https://7875eb592bb94554ad35421dccc6847f.elasticsearch.lyra-836.appcloud.swisscom.com"]
user => "logstash-system-ExjpCND01GbF7knG"
password => "5v9nUztOkz0WUdKK"
ssl => true
ssl_certificate_verification => true
codec => "plain"
workers => 1
index => "parsed-%{+YYYY.MM.dd}"
manage_template => true
template_name => "logstash"
template_overwrite => true
}
} else {
elasticsearch {
hosts => ["https://7875eb592bb94554ad35421dccc6847f.elasticsearch.lyra-836.appcloud.swisscom.com"]
user => "logstash-system-ExjpCND01GbF7knG"
password => "5v9nUztOkz0WUdKK"
ssl => true
ssl_certificate_verification => true
codec => "plain"
workers => 1
index => "unparsed-%{+YYYY.MM.dd}"
manage_template => true
template_name => "logstash"
template_overwrite => true
}
}
}
Thanks #Strelok for pointing me in the right direction.

Can’t Send #metadata to elasticsearch

I want to include #metadata field contents in my elasticsearch output.
This is the output when i am using stdout in my output filter-
{
"#timestamp" => 2018-03-08T08:17:42.059Z,
"thread_name" => "SimpleAsyncTaskExecutor-2",
"#metadata" => {
"dead_letter_queue" => {
"entry_time" => 2018-03-08T08:17:50.082Z,
"reason" => "Could not index event to Elasticsearch. status: 400, action: ["index", {:_id=>nil, :_index=>"applog-2018.03.08", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x3ab79ab5], response: {"index"=>{"_index"=>"applog-2018.03.08", "_type"=>"doc", "_id"=>"POuwBGIB0PJDPQOoDy1Q", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"failed to parse [message]", "caused_by"=>{"type"=>"illegal_state_exception", "reason"=>"Can't get text on a START_OBJECT at 1:223"}}}}",
"plugin_type" => "elasticsearch",
"plugin_id" => "7ee60ceccc2ef7c933cf5aa718d42f24a65b489e12a1e1c7b67ce82e04ef0d37"
}
},
"#version" => "1",
"beat" => {
"name" => "filebeat-kwjn6",
"version" => "6.0.0"
},
"dateOffset" => 408697,
"source" => "/var/log/applogs/spring-cloud-dataflow/Log.log",
"logger_name" => "decurtis.dxp.deamon.JobConfiguration",
"message" => {
"timeStamp" => "2018-01-30",
"severity" => "ERROR",
"hostname" => "",
"commonUtility" => {},
"offset" => "Etc/UTC",
"messageCode" => "L_9001",
"correlationId" => "ea5b13c3-d395-4fa5-8124-19902e400316",
"componentName" => "dxp-deamon-refdata-country",
"componentVersion" => "1",
"message" => "Unhandled exceptions",
},
"tags" => [
[0] "webapp-log",
[1] "beats_input_codec_plain_applied",
[2] "_jsonparsefailure"
]
}
I want my #metadata field in elasticsearch output.
Below is my conf file:
input {
dead_letter_queue {
path => "/usr/share/logstash/data/dead_letter_queue"
commit_offsets => true
pipeline_id => "main"
}
}
filter {
json {
source => "message"
}
mutate {
rename => { "[#metadata][dead_letter_queue][reason]" => "reason" }
}
}
output {
elasticsearch {
hosts => "elasticsearch"
manage_template => false
index => "deadletterlog-%{+YYYY.MM.dd}"
}
}
Now in my output there is a field called "reason" but without any content. Is there something i am missing.
this can help :-
mutate {
add_field => {
"reason" => "%{[#metadata][dead_letter_queue][reason]}"
"plugin_id" => "%{[#metadata][dead_letter_queue][plugin_id]}"
"plugin_type" => "%{[#metadata][dead_letter_queue][plugin_type]}"
}
}

Logstash Configuration Issue - JSON

I am getting below error:
LogStash::Json::ParserError: Unexpected character ('.' (code 46)): Expected space separating root-level values
Below is my logstash.conf
input {
beats {
port => 5044
type => beats
codec => json_lines
}
gelf {
port =>5043
type => xxx
}
tcp {
port => 5045
codec => json_lines
type => xxx
}
}
filter {
if [type] == "beats" {
json {
source => "message"
}}
if [type] == "beats" {
geoip {
source => "remote_ip"
target => "geoip"
database => "/etc/logstash/GeoLiteCity.dat"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}}
}
output {
if [type] == "beats" {
amazon_es {
hosts => [“xxx”]
region => “xx”
index => "%{[#metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[#metadata][type]}"
codec => "json"
}
}
if [type] == "ecs" {
amazon_es {
hosts => [“xx”]
region => “x”
index => "%{[tag]}-%{+YYYY.MM.dd}"
#index => "testing-%{+YYYY.MM.dd}"
document_type => "%{[type]}"
codec => "json"
}
}
if [type] == "ecstcp" {
amazon_es {
hosts => [“xx”]
region => “xx”
index => "%{[logstash_index]}-%{+YYYY.MM.dd}"
#index => "filetesting-%{+YYYY.MM.dd}"
document_type => "%{[type]}"
codec => "json"
}
}
}

Resources