Elasticsearch - aggregate over filtered data - elasticsearch

I have a query that returns a set of documents (100). Over these I want to apply an aggregation, because these are most relevant. When I try to aggregate, that returns aggregations over all results, not over the first 100.
Query:
{
"size": 100,
"sort": [
{
"_score": {
"order": "desc"
}
}
],
"from": 0,
"query": {
.......
},
"aggregations": {
"category.category_id": {
"nested": {
"path": "category"
},
"aggregations": {
"category.category_id": {
"terms": {
"field": "category.category_id",
"size": 2,
"order": {
"_count": "desc"
}
}
}
}
}
}
Result:
{
"took": 33,
"timed_out": false,
"_shards": {
"total": 4,
"successful": 4,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 1042,
"max_score": 60,
"hits": [...100 hits...]
},
"aggregations": {
"category.category_id": {
"doc_count": 5186,
"category.category_id": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 196,
"buckets": [
{
"key": 2,
"doc_count": 1042
},
{
"key": 2764,
"doc_count": 272
}
....
]
}
}
}
Expected:
{
"took": 33,
"timed_out": false,
"_shards": {
"total": 4,
"successful": 4,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 1042,
"max_score": 60,
"hits": [...100 hits...]
},
"aggregations": {
"category.category_id": {
"doc_count": 100,
"category.category_id": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": x,
"buckets": [
{
"key": 2,
"doc_count": x (x< 100) (eg 37)
},
{
"key": 2764,
"doc_count": y (y <= 100 -x) (eg 10)
}
....
]
}
}
}
Is possible to aggregate over filtered data? or haw can I aggregate over most relevant data?

You can use a filter aggregation as described by elasticsearch documentation
{
"aggs" : {
"agg_name" : {
"filter" : { //Add your query },
"aggs" : {
"terms": {
"field": "category.category_id",
"size": 2,
"order": {
"_count": "desc"
}
}
}
}
}
If you want you can add one more aggregation inside the 2nd aggs

Related

Aggs percentage doc_count

So I know my total hits are 182 documents
"hits": {
"total": {
"value": 182,
"relation": "eq"
},
"max_score": null,
"hits": []
},
And then I make a aggregation to know how many documents have the source instagagram or twitter and it returns me:
"bySource": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "instagram",
"doc_count": 162
},
{
"key": "twitter",
"doc_count": 20
}
]
}
Is it possible to get the percentage of documents that have source twitter and instagram?
So the percentage of documents that have source instagram is 89 % and twitter 11%.
My aggregation code its like this:
"aggs": {
"bySource": {
"terms": {
"field": "profile.source.keyword"
}
}
}
Let me know if this is possible.
Thank you
Sure, it is possible using the 'Bucket Script Aggregation'.
An example query might look like this:
{
"size": 0,
"aggs": {
"filters_agg": {
"filters": {
"filters": {
"sourceCount": {
"match_all": {}
}
}
},
"aggs": {
"bySource": {
"terms": {
"field": "profile.source.keyword"
}
},
"instagram_count_percentage": {
"bucket_script": {
"buckets_path": {
"instagram_count": "bySource['instagram']>_count",
"total_count": "_count"
},
"script": "Math.round((params.instagram_count * 100)/params.total_count)"
}
},
"twitter_count_percentage": {
"bucket_script": {
"buckets_path": {
"twitter_count": "bySource['twitter']>_count",
"total_count": "_count"
},
"script": "Math.round((params.twitter_count * 100)/params.total_count)"
}
}
}
}
}
}
And the response could be something like this:
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 182,
"relation": "eq"
},
"max_score": null,
"hits": []
},
"aggregations": {
"filters_agg": {
"buckets": {
"sourceCount": {
"doc_count": 182,
"bySource": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "instagram",
"doc_count": 162
},
{
"key": "twitter",
"doc_count": 20
}
]
},
"instagram_count_percentage": {
"value": 89
},
"twitter_count_percentage": {
"value": 11
}
}
}
}
}
}
Try to adjust it or get inspired depending on your case and your mapping.

Showing the selected value in a pipeline aggregation

I'm running an aggregation on the hash of the docs in my set.
Within each bucket I select the oldest and most recent.
I want an overview:
total number of docs
most recent
oldest
I have managed to get the total to work but am struggling with the oldest and most recent.
My query (limited to 2 results in the aggregation until I get it right):
{
"size": 0,
"query": {
"bool": {
"must_not": [
{
"term": {
"Text_SHA2": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
}
}
]
}
},
"aggs": {
"overall_Total": {
"sum_bucket": {
"buckets_path": "by_SHA2>_count"
}
},
"overall_MostRecent": {
"max_bucket": {
"buckets_path": "by_SHA2>the_MostRecent"
}
},
"by_SHA2": {
"terms": {
"field": "Text_SHA2",
"size": 2
},
"aggs": {
"the_MostRecent": {
"max": {
"field": "ReceivedDateUTC"
}
},
"the_Oldest": {
"min": {
"field": "ReceivedDateUTC"
}
}
}
}
}
}
What I get back:
{
"took": 341,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 1163611,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"by_SHA2": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 1163388,
"buckets": [
{
"key": "0683dcdcd26c16315292ecf02307e9d819a08522b35dff933b406688d8d3edb9",
"doc_count": 119,
"the_Oldest": {
"value": 1.54284803E12,
"value_as_string": "2018-11-22T00:53:50.000"
},
"the_MostRecent": {
"value": 1.572209574E12,
"value_as_string": "2019-10-27T20:52:54.000"
}
},
{
"key": "e757c30feeea67425ba02d8821295954d23bb9f6bf979fb8113d2cdf8f79b378",
"doc_count": 104,
"the_Oldest": {
"value": 1.545930842E12,
"value_as_string": "2018-12-27T17:14:02.000"
},
"the_MostRecent": {
"value": 1.572340576E12,
"value_as_string": "2019-10-29T09:16:16.000"
}
}
]
},
"overall_Total": {
"value": 223.0
},
"overall_MostRecent": {
"value": 1.572340576E12,
"keys": [
"e757c30feeea67425ba02d8821295954d23bb9f6bf979fb8113d2cdf8f79b378"
]
}
}
}
What I'd like to get back (please see difference in "overall_MostRecent" at the end):
{
"took": 341,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 1163611,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"by_SHA2": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 1163388,
"buckets": [
{
"key": "0683dcdcd26c16315292ecf02307e9d819a08522b35dff933b406688d8d3edb9",
"doc_count": 119,
"the_Oldest": {
"value": 1.54284803E12,
"value_as_string": "2018-11-22T00:53:50.000"
},
"the_MostRecent": {
"value": 1.572209574E12,
"value_as_string": "2019-10-27T20:52:54.000"
}
},
{
"key": "e757c30feeea67425ba02d8821295954d23bb9f6bf979fb8113d2cdf8f79b378",
"doc_count": 104,
"the_Oldest": {
"value": 1.545930842E12,
"value_as_string": "2018-12-27T17:14:02.000"
},
"the_MostRecent": {
"value": 1.572340576E12,
"value_as_string": "2019-10-29T09:16:16.000"
}
}
]
},
"overall_Total": {
"value": 223.0
},
"overall_MostRecent": {
"value": 1.572340576E12,
"value_as_string": "2019-10-29T09:16:16.000"
}
}
}
There's obviously something wrong with my "overall_MostRecent" section of the query. If anyone could point that out to me I'd be much obliged.

How to apply mathematical operation on elastic search aggregations bucket data

I am using elastic search 5.5.1.
My requirement is to apply following formula : (doc_count*100/10) on below set of data :
{
"took": 30,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 13,
"max_score": 0,
"hits": []
},
"aggregations": {
"group_by_botId": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": 1,
"doc_count": 9
},
{
"key": 3,
"doc_count": 4
}
]
}
}
}
I should be able to find solution for each bucket items. I don't want to use Java SDK for this problem as requirement is to use only elastic search REST API.
You can use the bucket_script aggregation in order to run a script for each bucket:
"aggs": {
"group_by_botId": {
"terms": {
"field": "botId"
},
"aggs": {
"count": {
"bucket_script": {
"buckets_path": {
"doc_count" : "_count"
},
"script": "params.doc_count * 100 / 10"
}
}
}
}
}

Wrong sum with aggregations

I'm trying to run this query:
GET my_index/_search
{
"size": 0,
"query": {
"bool": {
"filter": [
{
"query_string": {
"query": "_exists_:products_count",
"default_operator": "AND"
}
}
]
}
},
"aggs": {
"pid": {
"terms": {
"field": "pid",
"size": 15,
"order": {
"products_counter": "ASC"
}
},
"aggs": {
"products_counter": {
"sum": {
"field": "products_count"
}
}
}
}
}
}
The results I get are:
{
"took": 5,
"timed_out": false,
"_shards": {
"total": 12,
"successful": 12,
"failed": 0
},
"hits": {
"total": 489681,
"max_score": 0,
"hits": []
},
"aggregations": {
"pid": {
"doc_count_error_upper_bound": -1,
"sum_other_doc_count": 488443,
"buckets": [
{
"key": 3229479298,
"doc_count": 14,
"products_counter": {
"value": 26
}
},
{...
Although the results for the pid returned are 188 and not 26.
if I raise the size of the aggregation from 15 to 100000 for example I do get the right number.
any help with understanding and fixing my problem?

Get count of particular field in a document using Elasticsearch

Requirement:
I want to find the count of aID for a particular category ID.
(i.e for categoryID 2532 i want the count as 2 that means it is assigned to two aID's).
I tried with aggregations but with that i can able to get only the doc count rather than field count.
Mappings
"List": {
"properties": {
"aId": {
"type": "long"
},
"CategoryList": {
"properties": {
"categoryId": {
"type": "long"
},
"categoryName": {
"type": "string"
}
}
}
}
}
Sample Document:
"List": [
{
"aId": 33074,
"CategoryList": [
{
"categoryId": 2532,
"categoryName": "VODAFONE"
}
]
},
{
"aId": 12074,
"CategoryList": [
{
"categoryId": 2532,
"categoryName": "VODAFONE"
}
]
},
{
"aId": 120755,
"CategoryList": [
{
"categoryId": 1234,
"categoryName": "SMPLKE"
}
]
}
]
using cardinality aggregation will not help you getting the desired results. Cardinality aggregation returns the count of distinct values for the field, where are you want to find the count of appearance for number of times for a field.
You can use the following query, Here you can first filter the document for CategoryList.categoryId and then run a simple terms aggregation on this field
POST index_name1111/_search
{
"query": {
"bool": {
"must": [{
"term": {
"CategoryList.categoryId": {
"value": 2532
}
}
}]
}
},
"aggs": {
"count_is": {
"terms": {
"field": "CategoryList.categoryId",
"size": 10
}
}
}
}
Response of above query -
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 0,
"hits": []
},
"aggregations": {
"count_is": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": 2532,
"doc_count": 2
}
]
}
}
}
Or you can also chuck away the filter and running the aggregation only will return you all categoryId with their count of appearance.
POST index_name1111/_search
{
size: 0,
"aggs": {
"count_is": {
"terms": {
"field": "CategoryList.categoryId",
"size": 10
}
}
}
}
Response of above query
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 3,
"max_score": 0,
"hits": []
},
"aggregations": {
"count_is": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": 2532,
"doc_count": 2
},
{
"key": 1234,
"doc_count": 1
}
]
}
}
}
Using cardinality aggregation you will get the following response with following query
POST index_name1111/_search
{
"size": 0,
"query": {
"bool": {
"must": [{
"term": {
"CategoryList.categoryId": {
"value": 2532
}
}
}]
}
},
"aggs": {
"id_count": {
"cardinality": {
"field": "CategoryList.categoryId"
}
}
}
}
Response of above query which doesn't give you desired result, since two documents matched both with categoryId as 252 so count of distinct is 1.
{
"took": 4,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 0,
"hits": []
},
"aggregations": {
"id_count": {
"value": 1
}
}
}
Hope this helps
Thanks

Resources