In order to evaluate its potential to help on our daily operations, I have deployed Elastic Search and Kibana (7.7.1 with BASIC license) and created an index template for Ntopng (our monitoring platform).
Since indexes keep growing, I want to delete Ntopng indexes older than 20 days or so, therefore I have created a life cycle policy called ntopng where the time-stamped index should rollover after 1 day (for testing purposes) and then will be deleted after 2 days of the rollover:
Next I picked a time-stamped index created that day and applied the lifecycle policy to it:
Before that, I had to create an alias for that Index, so I did it manually:
POST /_aliases
{
"actions" : [
{ "add" : { "index" : "ntopng-2020.09.09", "alias" : "ntopng_Alias" } }
]
}
All looked good after that ( I guess) as no errors or alarms were displayed:
"indices" : {
"ntopng-2020.09.09" : {
"index" : "ntopng-2020.09.09",
"managed" : true,
"policy" : "ntopng",
"lifecycle_date_millis" : 1599609600433,
"age" : "20.14h",
"phase" : "hot",
"phase_time_millis" : 1599681721821,
"action" : "rollover",
"action_time_millis" : 1599680521920,
"step" : "check-rollover-ready",
"step_time_millis" : 1599681721821,
"is_auto_retryable_error" : true,
"failed_step_retry_count" : 1,
"phase_execution" : {
"policy" : "ntopng",
"phase_definition" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_age" : "1d"
},
"set_priority" : {
"priority" : 100
}
}
},
"version" : 4,
"modified_date_in_millis" : 1599509572867
}
}
My expectation was that in the next day the policy would be automatically rolled over to the next index (ntopng-2020.10.10) so that the initial index would be eventually deleted the next two days.
Instead, I got the following errors:
GET ntopng-*/_ilm/explain
{
"indices" : {
"ntopng-2020.09.09" : {
"index" : "ntopng-2020.09.09",
"managed" : true,
"policy" : "ntopng",
"lifecycle_date_millis" : 1599609600433,
"age" : "1.94d",
"phase" : "hot",
"phase_time_millis" : 1599776521822,
"action" : "rollover",
"action_time_millis" : 1599680521920,
"step" : "ERROR",
"step_time_millis" : 1599777121822,
"failed_step" : "check-rollover-ready",
"is_auto_retryable_error" : true,
"failed_step_retry_count" : 80,
"step_info" : {
"type" : "illegal_argument_exception",
"reason" : """index name [ntopng-2020.09.09] does not match pattern '^.*-\d+$'""",
"stack_trace" : """java.lang.IllegalArgumentException: index name [ntopng-2020.09.09] does not match pattern '^.*-\d+$'
at org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.generateRolloverIndexName(TransportRolloverAction.java:241)
at org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.masterOperation(TransportRolloverAction.java:133)
at org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.masterOperation(TransportRolloverAction.java:73)
at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.lambda$doStart$3(TransportMasterNodeAction.java:170)
at org.elasticsearch.action.ActionRunnable$2.doRun(ActionRunnable.java:73)
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
at org.elasticsearch.common.util.concurrent.EsExecutors$DirectExecutorService.execute(EsExecutors.java:225)
at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.doStart(TransportMasterNodeAction.java:170)
at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction.start(TransportMasterNodeAction.java:133)
at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:110)
at org.elasticsearch.action.support.master.TransportMasterNodeAction.doExecute(TransportMasterNodeAction.java:59)
at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:153)
at org.elasticsearch.xpack.security.action.filter.SecurityActionFilter.apply(SecurityActionFilter.java:123)
at org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:151)
at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:129)
at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:64)
at org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)
at org.elasticsearch.client.node.NodeClient.doExecute(NodeClient.java:72)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:399)
at org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin(ClientHelper.java:92)
at org.elasticsearch.xpack.core.ClientHelper.executeWithHeadersAsync(ClientHelper.java:155)
at org.elasticsearch.xpack.ilm.LifecyclePolicySecurityClient.doExecute(LifecyclePolicySecurityClient.java:51)
at org.elasticsearch.client.support.AbstractClient.execute(AbstractClient.java:399)
at org.elasticsearch.client.support.AbstractClient$IndicesAdmin.execute(AbstractClient.java:1234)
at org.elasticsearch.client.support.AbstractClient$IndicesAdmin.rolloverIndex(AbstractClient.java:1736)
at org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep.evaluateCondition(WaitForRolloverReadyStep.java:127)
at org.elasticsearch.xpack.ilm.IndexLifecycleRunner.runPeriodicStep(IndexLifecycleRunner.java:173)
at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggerPolicies(IndexLifecycleService.java:329)
at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggered(IndexLifecycleService.java:267)
at org.elasticsearch.xpack.core.scheduler.SchedulerEngine.notifyListeners(SchedulerEngine.java:183)
at org.elasticsearch.xpack.core.scheduler.SchedulerEngine$ActiveSchedule.run(SchedulerEngine.java:211)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
at java.base/java.lang.Thread.run(Thread.java:832)
"""
},
"phase_execution" : {
"policy" : "ntopng",
"phase_definition" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_age" : "1d"
},
"set_priority" : {
"priority" : 100
}
}
},
"version" : 4,
"modified_date_in_millis" : 1599509572867
}
}
"ntopng-2020.09.10" : {
"index" : "ntopng-2020.09.10",
"managed" : true,
"policy" : "ntopng",
"lifecycle_date_millis" : 1599696000991,
"age" : "22.57h",
"phase" : "hot",
"phase_time_millis" : 1599776521844,
"action" : "rollover",
"action_time_millis" : 1599696122033,
"step" : "ERROR",
"step_time_millis" : 1599777121839,
"failed_step" : "check-rollover-ready",
"is_auto_retryable_error" : true,
"failed_step_retry_count" : 67,
"step_info" : {
"type" : "illegal_argument_exception",
"reason" : "index.lifecycle.rollover_alias [ntopng_Alias] does not point to index [ntopng-2020.09.10]",
"stack_trace" : """java.lang.IllegalArgumentException: index.lifecycle.rollover_alias [ntopng_Alias] does not point to index [ntopng-2020.09.10]
at org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep.evaluateCondition(WaitForRolloverReadyStep.java:104)
at org.elasticsearch.xpack.ilm.IndexLifecycleRunner.runPeriodicStep(IndexLifecycleRunner.java:173)
at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggerPolicies(IndexLifecycleService.java:329)
at org.elasticsearch.xpack.ilm.IndexLifecycleService.triggered(IndexLifecycleService.java:267)
at org.elasticsearch.xpack.core.scheduler.SchedulerEngine.notifyListeners(SchedulerEngine.java:183)
at org.elasticsearch.xpack.core.scheduler.SchedulerEngine$ActiveSchedule.run(SchedulerEngine.java:211)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
at java.base/java.lang.Thread.run(Thread.java:832)
"""
},
"phase_execution" : {
"policy" : "ntopng",
"phase_definition" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_age" : "1d"
},
"set_priority" : {
"priority" : 100
}
}
}
The first index error reads "index name [ntopng-2020.09.09] does not match pattern '^.*-\d+$"
while second one displays: ""index.lifecycle.rollover_alias [ntopng_Alias] does not point to index [ntopng-2020.09.10]"
Please note that I'm learning the basics on ES Index management, so I'd appreciate any clue on what the problem might be.
OK, I just found that the index name must end with a numeric pattern like 0001 and not 2020.09.09 So I may need to find an alternative way to make it work.
As per the kibana regex you can date and time pattern as well, instead of the 2020.01.01 use 2020-01-01
This should work as well. You can check the regex here : https://regex101.com/r/VclptX/1
Related
I wanted to test the new wildcard field type in my ElasticSearch instance (Aiven).
I've tried this:
PUT /wildcard_test
{
"mappings" : {
"properties" : {
"wildcard_field" : {
"type" : "wildcard"
}
}
}
}
And I'm getting this response:
{
"error" : {
"root_cause" : [
{
"type" : "mapper_parsing_exception",
"reason" : "No handler for type [wildcard] declared on field [wildcard_field]"
}
],
"type" : "mapper_parsing_exception",
"reason" : "Failed to parse mapping [_doc]: No handler for type [wildcard] declared on field [wildcard_field]",
"caused_by" : {
"type" : "mapper_parsing_exception",
"reason" : "No handler for type [wildcard] declared on field [wildcard_field]"
}
},
"status" : 400
}
Here are the info regarding the instance:
GET /
{
"name" : "...",
"cluster_name" : "...",
"cluster_uuid" : "...",
"version" : {
"number" : "7.9.3",
"build_flavor" : "unknown",
"build_type" : "unknown",
"build_hash" : "c4138e51121ef06a6404866cddc601906fe5c868",
"build_date" : "2020-10-16T10:36:16.141335Z",
"build_snapshot" : false,
"lucene_version" : "8.6.2",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
GET /_license
{
"error" : {
"root_cause" : [
{
"type" : "invalid_index_name_exception",
"reason" : "Invalid index name [_license], must not start with '_'.",
"index_uuid" : "_na_",
"index" : "_license"
}
],
"type" : "invalid_index_name_exception",
"reason" : "Invalid index name [_license], must not start with '_'.",
"index_uuid" : "_na_",
"index" : "_license"
},
"status" : 400
}
My understanding is that this feature is provided by X-Pack, which I don't whether or not is included in Aiven's service. Is there some way to make this work?
Although the wildcard field type was indeed added in v7.9, it's (unfortunately) only available as part of an X-Pack subscription and I presume the one running on Aiven is OSS which is missing this and other X-Pack features.
Version of Elastic Search 7.10.2
and Xpack is enabled and the licence is Basic
Hot Phase of metricbeat policy
Delete Phase of metricbeat policy
WHY IS IT NOT GETTING APPLIED ?
metricbeat-7.10.2-2021.02.10-000001 index details
{
"indices" : {
"metricbeat-7.10.2-2021.02.10-000001" : {
"index" : "metricbeat-7.10.2-2021.02.10-000001",
"managed" : true,
"policy" : "metricbeat",
"lifecycle_date_millis" : 1612959479882,
"age" : "8m",
"phase" : "hot",
"phase_time_millis" : 1612959480192,
"action" : "rollover",
"action_time_millis" : 1612959917863,
"step" : "check-rollover-ready",
"step_time_millis" : 1612959917863,
"phase_execution" : {
"policy" : "metricbeat",
"phase_definition" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_size" : "5b",
"max_age" : "5s",
"max_docs" : 5
}
}
},
"version" : 2,
"modified_date_in_millis" : 1612959551839
}
}
}
}
If a policy is modified AFTER an index has been created, it might not kick in as you expect.
ILM runs every 10 minutes by default, but that can be changed via the indices.lifecycle.poll_interval cluster setting.
I have configured my ILM to rollover when the indice size be 20GB or after passing 30 days in the hot node
but my indice passed 20GB and still didn't pass to the cold node
and when I run: GET _cat/indices?v I get:
green open packetbeat-7.9.2-2020.10.22-000001 RRAnRZrrRZiihscJ3bymig 10 1 63833049 0 44.1gb 22gb
Could you tell me how to solve that please !
Knowing that in my packetbeat file configuration, I have just changed the number of shards:
setup.template.settings:
index.number_of_shards: 10
index.number_of_replicas: 1
when I run the command GET packetbeat-7.9.2-2020.10.22-000001/_settings I get this output:
{
"packetbeat-7.9.2-2020.10.22-000001" : {
"settings" : {
"index" : {
"lifecycle" : {
"name" : "packetbeat",
"rollover_alias" : "packetbeat-7.9.2"
},
"routing" : {
"allocation" : {
"include" : {
"_tier_preference" : "data_content"
}
}
},
"mapping" : {
"total_fields" : {
"limit" : "10000"
}
},
"refresh_interval" : "5s",
"number_of_shards" : "10",
"provided_name" : "<packetbeat-7.9.2-{now/d}-000001>",
"max_docvalue_fields_search" : "200",
"query" : {
"default_field" : [
"message",
"tags",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"as.organization.name",
"client.address",
"client.as.organization.name",
and the output of the command GET /packetbeat-7.9.2-2020.10.22-000001/_ilm/explain is :
{
"indices" : {
"packetbeat-7.9.2-2020.10.22-000001" : {
"index" : "packetbeat-7.9.2-2020.10.22-000001",
"managed" : true,
"policy" : "packetbeat",
"lifecycle_date_millis" : 1603359683835,
"age" : "15.04d",
"phase" : "hot",
"phase_time_millis" : 1603359684332,
"action" : "rollover",
"action_time_millis" : 1603360173138,
"step" : "check-rollover-ready",
"step_time_millis" : 1603360173138,
"phase_execution" : {
"policy" : "packetbeat",
"phase_definition" : {
"min_age" : "0ms",
"actions" : {
"rollover" : {
"max_size" : "50gb",
"max_age" : "30d"
}
}
},
"version" : 1,
"modified_date_in_millis" : 1603359683339
}
}
}
}
It's weird that it's 50GB !!
Thanks for your help
So I found the solution of this problem.
After updating the policy, I removed the policy from the index using it, and then added it again to those index.
Is there a es query or some way to ask Elasticsearch that which field is being used as time field for a specific index?
You can use Kibana to choose the right time field (Step 5):
In Kibana, open Management, and then click Index Patterns.
If this is your first index pattern, the Create index pattern page opens automatically. Otherwise, click Create index pattern in the upper left.
Enter "your_index_name*" in the Index pattern field.
Click Next step
In Configure settings, select "#your_timestamp_field" in the Time Filter field name dropdown menu.
Click Create index pattern.
Kibana User Guide: Defining your index patterns
Or search in your index mapping for an field with "type: date"
curl 'http://localhost:9200/your_index/_mapping?pretty'
{
"your_index" : {
"mappings" : {
"your_index" : {
"properties" : {
"#**timestamp**" : {
"type" : "date"
},
"#version" : {
"type" : "text"
},
"clock" : {
"type" : "long"
},
"host" : {
"type" : "text"
},
"type" : {
"type" : "text"
}
}
}
}
}
}
Get Mapping
Or look into your indexed documents:
curl 'http://localhost:9200/your_index/_search?pretty'
{
"took" : 2,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 1,
"max_score" : 1.0,
"hits" : [
{
"_index" : "your_index",
"_type" : "your_index",
"_id" : "logstash-01.kvm.local",
"_score" : 1.0,
"_source" : {
"#timestamp" : "2018-11-10T18:03:22.822Z",
"host" : "logstash-01.kvm.local",
"#version" : "1",
"clock" : 558753,
"type" : "your_index"
}
}
]
}
}
Search API
When you have index already created and you want to check which field is used as a time field, navigate down to Management/Stack management/Index patterns, select your index and search through the fields. The field that is used as a time field has time (clock) icon next to it.
We're using logstash to sync Elastic search and we've around 3 million documents. It takes 3 to 4 hours to sync. Currently all we get is, it is started and stopped. Is there any way to see how many records processed in logstash ?
If you're using Logstash 5 and higher, the Logstash Monitoring API can help you. You can see and monitor what's happening inside Logstash as it processes events. If you hit the Pipeline stats API you'll get the total number of processed events per stage and plugin (input/filter/output):
curl -XGET 'localhost:9600/_node/stats/pipelines?pretty'
You'll get this type of response in which you can clearly see at any time how many events have been processed:
{
"pipelines" : {
"test" : {
"events" : {
"duration_in_millis" : 365495,
"in" : 216485,
"filtered" : 216485,
"out" : 216485,
"queue_push_duration_in_millis" : 342466
},
"plugins" : {
"inputs" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-1",
"events" : {
"out" : 216485,
"queue_push_duration_in_millis" : 342466
},
"name" : "beats"
} ],
"filters" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-2",
"events" : {
"duration_in_millis" : 55969,
"in" : 216485,
"out" : 216485
},
"failures" : 216485,
"patterns_per_field" : {
"message" : 1
},
"name" : "grok"
}, {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-3",
"events" : {
"duration_in_millis" : 3326,
"in" : 216485,
"out" : 216485
},
"name" : "geoip"
} ],
"outputs" : [ {
"id" : "35131f351e2dc5ed13ee04265a8a5a1f95292165-4",
"events" : {
"duration_in_millis" : 278557,
"in" : 216485,
"out" : 216485
},
"name" : "elasticsearch"
} ]
},
"reloads" : {
"last_error" : null,
"successes" : 0,
"last_success_timestamp" : null,
"last_failure_timestamp" : null,
"failures" : 0
},
"queue" : {
"type" : "memory"
}
}
}