I have nginx logs and i have this date format [02/Mar/2015:13:02:51 +0000]
What should i use in elasticsearch and what i should put in the dateformat field of Kibana4?
curl -XGET 'http://localhost:9200/_mapping?pretty'
{
"nginx" : {
"mappings" : {
"t07_nginx" : {
"properties" : {
"#timestamp" : {
"type" : "date",
"format" : "dateOptionalTime"
},
"body_bytes_sent" : {
"type" : "string"
},
"geoip_country_code" : {
"type" : "string"
},
"host" : {
"type" : "string"
},
"http_host" : {
"type" : "string"
},
"http_referer" : {
"type" : "string"
},
"http_user_agent" : {
"type" : "string",
"index" : "not_analyzed"
},
"http_x_forwarded_for" : {
"type" : "string"
},
"message" : {
"type" : "string"
},
"msec request_time" : {
"type" : "string"
},
"remote_addr" : {
"type" : "string"
},
"request_http_protocol" : {
"type" : "string"
},
"request_time" : {
"type" : "string"
},
"request_type" : {
"type" : "string"
},
"request_url" : {
"type" : "string"
},
"status" : {
"type" : "string"
},
"upstream_addr" : {
"type" : "string"
},
"upstream_response_time" : {
"type" : "string"
}
}
}
}
}
with the above i can't see any data(events) in Kibana
Thanks
What does the input plugin for nginx/output plugin for elasticsearch in your fluentd config file look like?
Also, make sure you have your time range setup correctly in kibana. I believe it defaults to 15 minutes.
Related
Im looking for a clean solution for this. Basically I have arrays which are numbered by an integer. This number may be from 1-50. Rather than repeating my index 50 times, is there a work around for this?
Here is an example of how I would do it for level 1.
Thanks
"test" : {
"properties" : {
"1" : {
"properties" : {
"name" : {
"type" : "string",
"index" : "not_analyzed"
},
"taglevel" : {
"type" : "long"
}
}
},
"2" : {
"properties" : {
"name" : {
"type" : "string",
"index" : "not_analyzed"
},
"taglevel" : {
"type" : "long"
}
}
},
"3" : {
"properties" : {
"name" : {
"type" : "string",
"index" : "not_analyzed"
},
"taglevel" : {
"type" : "long"
}
}
},
repeat 47 times more until
"50" : {
"properties" : {
"name" : {
"type" : "string",
"index" : "not_analyzed"
},
"taglevel" : {
"type" : "long"
}
}
},
I've this mapping for fuas type:
curl -XGET 'http://localhost:9201/living_team/_mapping/fuas?pretty'
{
"living_v1" : {
"mappings" : {
"fuas" : {
"properties" : {
"backlogStatus" : {
"type" : "long"
},
"comment" : {
"type" : "string"
},
"dueTimestamp" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"matter" : {
"type" : "string"
},
"metainfos" : {
"properties" : {
"category 1" : {
"type" : "string"
},
"key" : {
"type" : "string"
},
"null" : {
"type" : "string"
},
"processos" : {
"type" : "string"
}
}
},
"resources" : {
"properties" : {
"noteId" : {
"type" : "string"
},
"resourceId" : {
"type" : "string"
}
}
},
"status" : {
"type" : "long"
},
"timestamp" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"user" : {
"type" : "string",
"index" : "not_analyzed"
}
}
}
}
}
}
I'm trying to perform this aggregation:
curl -XGET 'http://ESNode01:9201/living_team/fuas/_search?pretty' -d '
{
"aggs" : {
"demo" : {
"nested" : {
"path" : "metainfos"
},
"aggs" : {
"key" : { "terms" : { "field" : "metainfos.key" } }
}
}
}
}
'
ES realizes me:
"error" : {
"root_cause" : [ {
"type" : "aggregation_execution_exception",
"reason" : "[nested] nested path [metainfos] is not nested"
} ],
"type" : "search_phase_execution_exception",
"reason" : "all shards failed",
"phase" : "query_fetch",
"grouped" : true,
"failed_shards" : [ {
"shard" : 3,
"index" : "living_v1",
"node" : "HfaFBiZ0QceW1dpqAnv-SA",
"reason" : {
"type" : "aggregation_execution_exception",
"reason" : "[nested] nested path [metainfos] is not nested"
}
} ]
},
"status" : 500
}
Any ideas?
You're missing "type":"nested" from your metainfos mapping.
Should have been:
"metainfos" : {
"type":"nested",
"properties" : {
"category 1" : {
"type" : "string"
},
"key" : {
"type" : "string"
},
"null" : {
"type" : "string"
},
"processos" : {
"type" : "string"
}
}
}
I am trying to do geomap of a value in Elasticsearch but the value type of the client_location is set as a string and I would like to change it to geo_point. When I run the following I am getting:
#curl -XGET "http://core.z0z0.tk:9200/_all/_mappings/http?pretty"
{
"packetbeat-2015.12.04" : {
"mappings" : {
"http" : {
"properties" : {
"#timestamp" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"beat" : {
"properties" : {
"hostname" : {
"type" : "string"
},
"name" : {
"type" : "string"
}
}
},
"bytes_in" : {
"type" : "long"
},
"bytes_out" : {
"type" : "long"
},
"client_ip" : {
"type" : "string"
},
"client_location" : {
"type" : "string"
},
"client_port" : {
"type" : "long"
},
"client_proc" : {
"type" : "string"
},
"client_server" : {
"type" : "string"
},
"count" : {
"type" : "long"
},
"direction" : {
"type" : "string"
},
"http" : {
"properties" : {
"code" : {
"type" : "long"
},
"content_length" : {
"type" : "long"
},
"phrase" : {
"type" : "string"
}
}
},
"ip" : {
"type" : "string"
},
"method" : {
"type" : "string"
},
"notes" : {
"type" : "string"
},
"params" : {
"type" : "string"
},
"path" : {
"type" : "string"
},
"port" : {
"type" : "long"
},
"proc" : {
"type" : "string"
},
"query" : {
"type" : "string"
},
"responsetime" : {
"type" : "long"
},
"server" : {
"type" : "string"
},
"status" : {
"type" : "string"
},
"type" : {
"type" : "string"
}
}
}
}
}
}
When I run the following command to change the type of the value from string to geo_point I am getting the following error:
# curl -XPUT "http://localhost:9200/_all/_mappings/http" -d '
> {
> "http" : {
> "properties" : {
> "client_location" : {
> "type" : "geo_point"
> }
> }
> }
> }
> '
{"error":{"root_cause":[{"type":"merge_mapping_exception","reason":"Merge failed with failures {[mapper [client_location] of different type, current_type [string], merged_type[geo_point]]}"}],"type":"merge_mapping_exception","reason":"Merge failed with failures {[mapper [client_location] of different type, current_type [string], merged_type [geo_point]]}"},"status":400}
Any suggestion how should I correctly change the type?
Thanks in advance.
Unfortunately, once you've created a field you cannot change its type anymore. The best thing to do is to delete the index and recreate it properly with the adequate mapping.
Another temporary solution if you don't want to delete your index immediately, is to create a sub-field of your existing field:
# curl -XPUT "http://localhost:9200/_all/_mappings/http" -d '{
"http": {
"properties": {
"client_location": {
"type": "string",
"fields": {
"geo": {
"type": "geo_point"
}
}
}
}
}
}'
And then you can access it in your queries using client_location.geo.
Also note that you have to re-index your data in order to populate that new sub-field... which means you might just as well delete your index and re-create it properly.
UPDATE
After installing Packetbeat you need to make sure to install the packetbeat template yourself as described here (i.e. it is not done automatically):
https://www.elastic.co/guide/en/beats/packetbeat/current/packetbeat-getting-started.html#packetbeat-template
curl -XPUT 'http://localhost:9200/_template/packetbeat' -d#/etc/packetbeat/packetbeat.template.json
I'm new to ElasticSearch, started working with ElasticSearch 1.7.3 as part of a Logstash-ElasticSearch-Kibana deployment.
I've defined a mapping template for my log messages, this is the interesting part:
{
"template" : "logstash-*",
"settings" : { "index.refresh_interval" : "5s" },
"mappings" : {
"_default_" : {
"_all" : {"enabled" : true, "omit_norms" : true},
"dynamic_templates" : [ {
"date_fields" : {
"match" : "*",
"match_mapping_type" : "date",
"mapping" : { "type" : "date", "doc_values" : true }
}
}],
"properties" : {
"#version" : { "type" : "string", "index" : "not_analyzed" },
"#timestamp" : { "type" : "date", "format" : "dateOptionalTime" },
"message" : { "type" : "string" }
}
} ,
"my_log" : {
"_all" : { "enabled" : true, "omit_norms" : true },
"dynamic_templates" : [ {
"date_fields" : {
"match" : "*",
"match_mapping_type" : "date",
"mapping" : { "type" : "date", "doc_values" : true }
}
}],
"properties" : {
"#timestamp" : { "type" : "date", "format" : "dateOptionalTime" },
"file" : { "type" : "string" },
"message" : { "type" : "string" }
"geolocation" : { "type" : "string" },
}
}
}
}
Although the #timestamp field is defined as doc_value:true I have an error of MemoryException because it is a fielddata:
[FIELDDATA] Data too large, data for [#timestamp] would be larger than
limit of [633785548/604.4 mb]
NOTE:
I know I can change the memory or add more nodes to the cluster, but in my point of view this is a design problem where this field should not be indexed in memory.
I am using the elasticsearch ruby gem to connect to an es server and currently have an index with the below mapping. I am trying to understand the proper syntax to query these nested objects. Experimenting with queries such as the following, but keep getting errors. I was wondering if someone could get me started on the proper syntax for querying a structure such as this? thanks!
client = Elasticsearch::Client.new log:true
client.search index: 'injuries', nested: { path: { week: {id: '1' } } }
Returns:
Elasticsearch::Transport::Transport::Errors::BadRequest: [400] {"error":"SearchPhaseExecutionException[Failed to execute phase [query
Sample Mapping:
{
"injuries" : {
"mappings" : {
"tbd" : {
"properties" : {
"injuries" : {
"properties" : {
"timestamp" : {
"properties" : {
"__content__" : {
"type" : "string"
},
"timeZone" : {
"type" : "string"
}
}
}
}
}
}
},
"football" : {
"properties" : {
"injuries" : {
"properties" : {
"timestamp" : {
"properties" : {
"__content__" : {
"type" : "string"
},
"timeZone" : {
"type" : "string"
}
}
},
"week" : {
"properties" : {
"id" : {
"type" : "string"
},
"inactivePlayers" : {
"properties" : {
"inactivePlayer" : {
"properties" : {
"firstName" : {
"type" : "string"
},
"lastName" : {
"type" : "string"
},
"playerId" : {
"type" : "string"
},
"position" : {
"type" : "string"
},
"status" : {
"type" : "string"
},
"teamId" : {
"type" : "string"
}
}
}
}
},
"injuredPlayers" : {
"properties" : {
"injuredPlayer" : {
"properties" : {
"displayName" : {
"type" : "string"
},
"firstName" : {
"type" : "string"
},
"gameStatus" : {
"type" : "string"
},
"injury" : {
"type" : "string"
},
"lastName" : {
"type" : "string"
},
"playerId" : {
"type" : "string"
},
"position" : {
"type" : "string"
},
"practiceStatus" : {
"type" : "string"
},
"teamId" : {
"type" : "string"
}
}
}
}
},
"season" : {
"type" : "string"
},
"seasonType" : {
"type" : "string"
}
}
}
}
}
}
}
}
}
}
Your nested query doesn't appear to have a query defined. I think it should be something like:
"nested" : {
"path" : "week",
"query" : {
"match" : {"week.id" : "1"}
}
}