How to hide buckets in ElasticSearch result? - elasticsearch

In my query, I aggregate the buckets in one scalar. Since I'm not interested in each bucket (which, in my case, are tens of millions), I'd like to remove them from the returned result; i.e. I want to do something like "size":0 to hide all the hits. Is it possible?
E.g.:
{
"size": 0,
"aggs": {
"pop": {
"terms": {
"field": "account_number",
"size": 0
},
"aggs": {
"average": {
"avg": {
"field": "price"
}
}
}
},
"sum_of_avg": {
"sum_bucket": {
"buckets_path": "pop>average"
}
}
}
}
Result:
[...]
"aggregations": {
"pop": {
"doc_count_error_upper_bound": 40851,
"sum_other_doc_count": 93441329,
"buckets": [...] <== i don't want this
},
"sum_of_avg": {
"value": 128.0768325884469
}

I just posted an answer in the related question.
In this case the request should look like this:
curl -XPOST 'http://localhost:9200/<index>/_search?filter_path=aggregations.sum_of_avg' -d '
{
"size": 0,
"aggs": {
"pop": {
"terms": {
"field": "account_number",
"size": 0
},
"aggs": {
"average": {
"avg": {
"field": "price"
}
}
}
},
"sum_of_avg": {
"sum_bucket": {
"buckets_path": "pop>average"
}
}
}
}
PS: if you found another solution, please share it here. Thanks!

I think what you want is the "Cardinality" Aggregation. That will return to you the number of distinct values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
Example:
GET devdev/alert/_search
{
"size": 0,
"aggs": {
"agg1": {
"cardinality": {
"field": "price"
}
}
}
}

Related

How to define percentage of result items with specific field in Elasticsearch query?

I have a search query that returns all items matching users that have type manager or lead.
{
"from": 0,
"size": 20,
"query": {
"bool": {
"should": [
{
"terms": {
"type": ["manager", "lead"]
}
}
]
}
}
}
Is there a way to define what percentage of the results should be of type "manager"?
In other words, I want the results to have 80% of users with type manager and 20% with type lead.
I want to make a suggestion to use bucket_path aggregation. As I know this aggregation needs to be run in sub-aggs of a histogram aggregation. As you have such field in your mapping so I think this query should work for you:
{
"size": 0,
"aggs": {
"NAME": {
"date_histogram": {
"field": "my_datetime",
"interval": "month"
},
"aggs": {
"role_type": {
"terms": {
"field": "type",
"size": 10
},
"aggs": {
"count": {
"value_count": {
"field": "_id"
}
}
}
},
"role_1_ratio": {
"bucket_script": {
"buckets_path": {
"role_1": "role_type['manager']>count",
"role_2": "role_type['lead']>count"
},
"script": "params.role_1 / (params.role_1+params.role_2)*100"
}
},
"role_2_ratio": {
"bucket_script": {
"buckets_path": {
"role_1": "role_type['manager']>count",
"role_2": "role_type['lead']>count"
},
"script": "params.role_2 / (params.role_1+params.role_2)*100"
}
}
}
}
}
}
Please let me know if it didn't work well for you.

ElasticSearch 6.6.0 Aggregation Avg with script not working

I have the following I am trying to run on my cluster:
GET _search
{
"aggs": {
"buckets": {
"terms": {
"field": "main_feature_id.keyword",
"size": 10
},
"aggs": {
"average_dwell": {
"avg": {
"field": "dwell.dwell_ms",
"script": {
"lang": "painless",
"source": "long x = Math.round(_value*100)/100000; return x;"
}
}
}
}
}
}
}
But no matter what I try I cannot get it to round the result.
Here is what the result looks like:
"doc_count" : 26032,
"average_dwell" : {
"value" : 44.87277178006528
}
Can someone please tell me what I am doing wrong I am sure it is something obvious.
Thank you!
_value script applies the script on each value of the document and then calculates the average of the modified values. What you seem to achieve is to reduce the precision to two decimal places. This can be achieved by making use of bucket script aggregation to get the expected value.
{
"aggs": {
"buckets": {
"terms": {
"field": "main_feature_id.keyword",
"size": 10
},
"aggs": {
"average_dwell": {
"avg": {
"field": "dwell.dwell_ms"
}
},
"rounded_avg": {
"bucket_script": {
"buckets_path": {
"curr_avg": "average_dwell"
},
"script": "Math.round(params.curr_avg * 100)/100.0;"
}
}
}
}
}
}

Elasticsearch Aggregations: Only return results of one of them?

I'm trying to find a way to only return the results of one aggregation in an Elasticsearch query. I have a max bucket aggregation (the one that I want to see) that is calculated from a sum bucket aggregation based on a date histogram aggregation. Right now, I have to go through 1,440 results to get to the one I want to see. I've already removed the results of the base query with the size: 0 modifier, but is there a way to do something similar with the aggregations as well? I've tried slipping the same thing into a few places with no luck.
Here's the query:
{
"size": 0,
"query": {
"range": {
"timestamp": {
"gte": "2018-11-28",
"lte": "2018-11-28"
}
}
},
"aggs": {
"hits_per_minute": {
"date_histogram": {
"field": "timestamp",
"interval": "minute"
},
"aggs": {
"total_hits": {
"sum": {
"field": "hits_count"
}
}
}
},
"max_transactions_per_minute": {
"max_bucket": {
"buckets_path": "hits_per_minute>total_hits"
}
}
}
}
Fortunately enough, you can do that with bucket_sort aggregation, which was added in Elasticsearch 6.4.
Do it with bucket_sort
POST my_index/doc/_search
{
"size": 0,
"query": {
"range": {
"timestamp": {
"gte": "2018-11-28",
"lte": "2018-11-28"
}
}
},
"aggs": {
"hits_per_minute": {
"date_histogram": {
"field": "timestamp",
"interval": "minute"
},
"aggs": {
"total_hits": {
"sum": {
"field": "hits_count"
}
},
"max_transactions_per_minute": {
"bucket_sort": {
"sort": [
{"total_hits": {"order": "desc"}}
],
"size": 1
}
}
}
}
}
}
This will give you a response like this:
{
...
"aggregations": {
"hits_per_minute": {
"buckets": [
{
"key_as_string": "2018-11-28T21:10:00.000Z",
"key": 1543957800000,
"doc_count": 3,
"total_hits": {
"value": 11
}
}
]
}
}
}
Note that there is no extra aggregation in the output and the output of hits_per_minute is truncated (because we asked to give exactly one, topmost bucket).
Do it with filter_path
There is also a generic way to filter the output of Elasticsearch: Response filtering, as this answer suggests.
In this case it will be enough to just do the following query:
POST my_index/doc/_search?filter_path=aggregations.max_transactions_per_minute
{ ... (original query) ... }
That would give the response:
{
"aggregations": {
"max_transactions_per_minute": {
"value": 11,
"keys": [
"2018-12-04T21:10:00.000Z"
]
}
}
}

How to use cumulative_sum with a previous aggregation?

I would like to plot a cumulative sum of some events, per day. The cumulative sum aggregation seems to be the way to go so I tried to reuse the example given in the docs.
The first aggregation works fine, the following query
{
"aggs": {
"vulns_day" : {
"date_histogram" :{
"field": "HOST_START_iso",
"interval": "day"
}
}
}
}
gives replies such as
(...)
{
"key_as_string": "2016-09-08T00:00:00.000Z",
"key": 1473292800000,
"doc_count": 76330
},
{
"key_as_string": "2016-09-09T00:00:00.000Z",
"key": 1473379200000,
"doc_count": 37712
},
(...)
I then wanted to query the cumulative sum of doc_count above via
{
"aggs": {
"vulns_day" : {
"date_histogram" :{
"field": "HOST_START_iso",
"interval": "day"
}
},
"aggs": {
"vulns_cumulated": {
"cumulative_sum": {
"buckets_path": "doc_count"
}
}
}
}
}
but it gives an error:
"reason": {
"type": "search_parse_exception",
"reason": "Could not find aggregator type [vulns_cumulated] in [aggs]",
I see that bucket_path should point to the elements to be summed and the example for cumulative aggregations created a specific intermediate sum but I do not have anything to sum (beside doc_count).
I guess, you should change your query like this:
{
"aggs": {
"vulns_day": {
"date_histogram": {
"field": "HOST_START_iso",
"interval": "day"
},
"aggs": {
"document_count": {
"value_count": {
"field": "HOST_START_iso"
}
},
"vulns_cumulated": {
"cumulative_sum": {
"buckets_path": "document_count"
}
}
}
}
}
}
I found the solution. Since doc_count did not seem to be available, I tried to retrieve stats for the time parameter, and use its count value. It worked:
{
"size": 0,
"aggs": {
"vulns_day": {
"date_histogram": {
"field": "HOST_START_iso",
"interval": "day"
},
"aggs": {
"dates_stats": {
"stats": {
"field": "HOST_START_iso"
}
},
"vulns_cumulated": {
"cumulative_sum": {
"buckets_path": "dates_stats.count"
}
}
}
}
}
}

Elasticsearch minBy

Is there a way in elasticsearch to get a field from a document containing the maximum value? (Basically working similarly to maxBy from scala)
For example (mocked):
{
"aggregations": {
"grouped": {
"terms": {
"field": "grouping",
"order": {
"docWithMin": "asc"
}
},
"aggregations": {
"withMax": {
"max": {
"maxByField": "a",
"field": "b"
}
}
}
}
}
}
For which {"grouping":1,"a":2,"b":5},{"grouping":1,"a":1,"b":10}
would return (something like): {"grouped":1,"withMax":5}, where the max comes from the first object due to "a" being higher there.
Assuming you just want the document back for which a is maximum, you can do this:
{
"size": 0,
"aggs": {
"grouped": {
"terms": {
"field": "grouping"
},
"aggs": {
"maxByA": {
"top_hits": {
"sort": [
{"a": {"order": "desc"}}
],
"size": 1
}
}
}
}
}
}

Resources