Is it possible to fetch count of total number of docs that contain a qualifying aggregation condition in elasticsearch? - elasticsearch

I use ES v7.3 and as per my requirements I am aggregating some fields to fetch the required docs in response, further their is a requirement to fetch the count of total number of all such docs also that contain the nested field which qualifies the aggregation condition as described below but I did not find a way where I am able to do that.
Current aggregation query that I am using to fetch the documents is,
"aggs": {
"users": {
"composite": {
"sources": [
{
"users": {
"terms": {
"field": "co_profileId.keyword"
}
}
}
],
"size": 5000
},
"aggs": {
"sessions": {
"nested": {
"path": "co_score"
},
"aggs": {
"last_4_days": {
"filter": {
"range": {
"co_score.sessionTime": {
"gte": "2021-01-10T00:00:31.399Z",
"lte": "2021-01-14T01:37:31.399Z"
}
}
},
"aggs": {
"score_count": {
"sum": {
"field": "co_score.value"
}
}
}
}
}
},
"page_view_count_filter": {
"bucket_selector": {
"buckets_path": {
"sessionCount": "sessions > last_4_days > score_count"
},
"script": "params.sessionCount > 100"
}
},
"filtered_users": {
"top_hits": {
"size": 1,
"_source": {
"includes": [
"co_profileId",
"co_type",
"co_score"
]
}
}
}
}
}
}
Sample doc:
{
"co_profileId": "14654325",
"co_type": "identify",
"co_updatedAt": "2021-01-11T11:37:33.499Z",
"co_score": [
{
"value": 3,
"sessionTime": "2021-01-09T01:37:31.399Z"
},
{
"value": 3,
"sessionTime": "2021-01-10T10:47:33.419Z"
},
{
"value": 6,
"sessionTime": "2021-01-11T11:37:33.499Z"
}
]
}

Related

Elasticsearch aggregation with unqiue counting

My documents consist of a history of orders and their state, here a minimal example:
{
"orderNumber" : "xyz",
"state" : "shipping",
"day" : "2022-07-20",
"timestamp" : "2022-07-20T15:06:44.290Z",
}
the state can be strings like shipping, processing, redo,...
For every possible state, I need to count the number of orders that had this state at some point during a day, without counting a state twice for the same orderNumber that day (which can happen if there is a problem and it needs to start from the beginning that same day).
My aggregation looks like this:
GET order-history/_search
{
"aggs": {
"countDays": {
"terms": {
"field": "day",
"order": {
"_key": "desc"
},
"size": 20
},
"aggs": {
"countStates": {
"terms": {
"field": "state.keyword",
"size": 10
}
}
}
}
}
, "size": 1
}
However, this will count a state for a given orderNumber twice if it reappears that same day. How would I prevent it from counting a state twice for each orderNumber, if it is on the same day?
Tldr;
I don't think there is a flexible and simple solution.
But if you know in advance the number of state that exists. Maybe through another aggregation query, to get all type of state.
You could do the following
POST /_bulk
{"index":{"_index":"73138766"}}
{"orderNumber":"xyz","state":"shipping","day":"2022-07-20"}
{"index":{"_index":"73138766"}}
{"orderNumber":"xyz","state":"redo","day":"2022-07-20"}
{"index":{"_index":"73138766"}}
{"orderNumber":"xyz","state":"shipping","day":"2022-07-20"}
{"index":{"_index":"73138766"}}
{"orderNumber":"bbb","state":"processing","day":"2022-07-20"}
{"index":{"_index":"73138766"}}
{"orderNumber":"bbb","state":"shipping","day":"2022-07-20"}
GET 73138766/_search
{
"size": 0,
"aggs": {
"per_day": {
"date_histogram": {
"field": "day",
"calendar_interval": "day"
},
"aggs": {
"shipping": {
"filter": { "term": { "state.keyword": "shipping" }
},
"aggs": {
"orders": {
"cardinality": {
"field": "orderNumber.keyword"
}
}
}
},
"processing": {
"filter": { "term": { "state.keyword": "processing" }
},
"aggs": {
"orders": {
"cardinality": {
"field": "orderNumber.keyword"
}
}
}
},
"redo": {
"filter": { "term": { "state.keyword": "redo" }
},
"aggs": {
"orders": {
"cardinality": {
"field": "orderNumber.keyword"
}
}
}
}
}
}
}
}
You will obtain the following results
{
"aggregations": {
"per_day": {
"buckets": [
{
"key_as_string": "2022-07-20T00:00:00.000Z",
"key": 1658275200000,
"doc_count": 5,
"shipping": {
"doc_count": 3,
"orders": {
"value": 2
}
},
"processing": {
"doc_count": 1,
"orders": {
"value": 1
}
},
"redo": {
"doc_count": 1,
"orders": {
"value": 1
}
}
}
]
}
}
}

Filter based on different values for the same field in different documents

Let's say I have the following data:
{
"id":"1",
"name": "John",
"tag":"x"
},
{
"id": 2,
"name":"John",
"tag":"y"
},
{
"id": 3,
"name":"Jane",
"tag":"x"
}
I want to get the count of documents (unique on name) that has both tag = "x" and tag = "y"
Given the above data, the query should return 1, because only John has two documents exists that has the two required tags.
What I am able to do so far is a query that uses OR ( so either tag = "x" or tag = "y") which will return 2. For example:
"aggs": {
"distict_count": {
"filter": {
"terms": {
"tag": [
"x",
"y"
]
}
},
"aggs": {
"agg_cardinality_name": {
"cardinality": {
"field": "name"
}
}
}
}
}
Would it be possible to change that to use and instead of or?
Try putting cardinality under a terms agg to get proper distinct counts:
{
"size": 0,
"aggs": {
"distict_count": {
"filter": {
"terms": {
"tag": [
"x",
"y"
]
}
},
"aggs": {
"agg_terms": {
"terms": {
"field": "name"
},
"aggs": {
"agg_cardinality_name": {
"cardinality": {
"field": "name"
}
}
}
}
}
}
}
}
CORRECTION
You can use a combination of cardinality aggs with a bucket_selector which'll rule out buckets where there are fewer than 2 unique tags -- i.e. both x and y:
{
"size": 0,
"aggs": {
"distict_count": {
"filter": {
"terms": {
"tag": [
"x",
"y"
]
}
},
"aggs": {
"agg_terms": {
"terms": {
"field": "name"
},
"aggs": {
"agg_cardinality_tag2": {
"bucket_selector": {
"buckets_path": {
"unique_tags_count": "unique_tags_count"
},
"script": "params.unique_tags_count > 1"
}
},
"unique_tags_count": {
"cardinality": {
"field": "tag"
}
},
"unique_names_count": {
"cardinality": {
"field": "name"
}
}
}
}
}
}
}
}

How to diversify the result of top-hits aggregation?

Let's start with a concrete example. I have a document with these fields:
{
"template": {
"mappings": {
"template": {
"properties": {
"tid": {
"type": "long"
},
"folder_id": {
"type": "long"
},
"status": {
"type": "integer"
},
"major_num": {
"type": "integer"
}
}
}
}
}
}
I want to aggregate the query result by field folder_id, and for each group divided by folder_id, retrieve the top-N documents' _source detail. So i write query DSL like:
GET /template/template/_search
{
"size": 0,
"query": {
"bool": {
"filter": [
{
"term": {
"status": 1
}
}
]
}
},
"aggs": {
"folder": {
"terms": {
"field": "folder_id",
"size": 10
},
"aggs": {
"top_hit":{
"top_hits": {
"size": 5,
"_source": ["major_num"]
}
}
}
}
}
}
However, now comes a requirement that the top hits documents for each folder_id must be diversified on the field major_num. For each folder_id, the top hits documents retrieve by the sub top_hits aggregation under the terms aggregation, must be unique on field major_num, and for each major_num value, return at most 1 document in the sub top hits aggregation result.
top_hits aggregation cannot accept sub-aggregations, so how should i solve the question?
Why not simply adding another terms aggregation on the major_num field ?
GET /template/template/_search
{
"size": 0,
"query": {
"bool": {
"filter": [
{
"term": {
"status": 1
}
}
]
}
},
"aggs": {
"folder": {
"terms": {
"field": "folder_id",
"size": 10
},
"aggs": {
"majornum": {
"terms": {
"field": "major_num",
"size": 10
},
"aggs": {
"top_hit": {
"top_hits": {
"size": 1
}
}
}
}
}
}
}
}

Elasticsearch distinct records in order with pagination

How do I get records after aggregation on a terms field in order with pagination. So far I have this:
{
"query": {
"bool": {
"filter": [
{
"terms": {
"user_id.keyword": [
"user#domain.com"
]
}
},
{
"range": {
"creation_time": {
"gte": "2019-02-04T19:00:00.000Z",
"lte": "2019-05-04T19:00:00.000Z"
}
}
}
],
"should": [
{
"wildcard": {
"operation": "*sol*"
}
},
{
"wildcard": {
"object_id": "*sol*"
}
},
{
"wildcard": {
"user_id": "*sol*"
}
},
{
"wildcard": {
"user_type": "*sol*"
}
},
{
"wildcard": {
"client_ip": "*sol*"
}
},
{
"wildcard": {
"country": "*sol*"
}
},
{
"wildcard": {
"workload": "*sol*"
}
}
]
}
},
"aggs": {
"user_ids": {
"terms": {
"field": "country.keyword",
"include": ".*United.*"
}
}
},
"from": 0,
"size": 10,
"sort": [
{
"creation_time": {
"order": "desc"
}
}
]
}
I looked into this and some people say its possible by using composite aggregations or by using partitions. But I am not sure how I can actually achieve this.
I also looked into bucket_sort but I cant seem to get it to work:
"my_bucket_sort": {
"bucket_sort": {
"sort": [
{
"user_ids": {
"order": "desc"
}
}
],
"size": 3
}
}
I am a noob at this. Kindly help me out. Thanks.
As the field is country, and presumably doesn't have a high cardinality, you could set size to be a sufficiently high number to return all countries in a single request
"aggs": {
"user_ids": {
"terms": {
"field": "country.keyword",
"include": ".*United.*",
"size": 10000
}
}
}
Or alternatively, for a high cardinality field, you could filter the aggregation first, and then use partitioning to page through the values
{
"size": 0,
"aggs": {
"user_ids": {
"filter": {
"wildcard" : { "country" : ".*United.*" }
},
"aggs": {
"countries": {
"terms": {
"field": "country.keyword",
"include": {
"partition": 0,
"num_partitions": 20
},
"size": 10000
}
}
}
}
}
}
where you would increase the value of partition with each query you send up to 19
See the elastic documentation for further details

Need aggregation on document inner array object - ElasticSearch

I am trying to do aggregation over the following document
{
"pid": 900000,
"mid": 9000,
"cid": 90,
"bid": 1000,
"gmv": 1000000,
"vol": 200,
"data": [
{
"date": "25-11-2018",
"gmv": 100000,
"vol": 20
},
{
"date": "24-11-2018",
"gmv": 100000,
"vol": 20
},
{
"date": "23-11-2018",
"gmv": 100000,
"vol": 20
}
]
}
The analysis which needs to be done here is:
Filter on mid or/and cid on all documents
Filter range on data.date for last 7 days and sum data.vol over that range for each pid
sort the documents over the sum obtained in previous step in desc order
Group these results by pid.
This means we are trying to get top products by sum of the volume (quantity sold) within a date range for specific cid/mid.
PID here refers product ID,
MID refers here merchant ID,
CID refers here category ID
Firstly you need to change your mapping to run the query on nested fields.
change the type for field 'data' as 'nested'.
Then you can use the range query in filter along with the terms filter on mid/cid to filter on the data. Once you get the correct data set, then you can aggregate on the pid following the sub aggregation on sum of vol.
Here is the below query.
{
"query": {
"bool": {
"filter": [
{
"bool": {
"must": [
{
"range": {
"data.date": {
"gte": "28-11-2018",
"lte": "25-11-2018"
}
}
},
{
"must": [
{
"terms": {
"mid": [
"9000"
]
}
}
]
}
]
}
}
]
}
},
"aggs": {
"AGG_PID": {
"terms": {
"field": "pid",
"size": 0,
"order": {
"TOTAL_SUM": "desc"
},
"min_doc_count": 1
},
"aggs": {
"TOTAL_SUM": {
"sum": {
"field": "data.vol"
}
}
}
}
}
}
You can modify the query accordingly. Hope this will be helpful.
Please find nested aggregation query which sorts by "vol" for each bucket of "pid". You can add any number of filters in the query part.
{
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"mid": "2"
}
}
]
}
},
"aggs": {
"top_products_sorted_by_order_volume": {
"terms": {
"field": "pid",
"order": {
"nested_data_object>order_volume_by_range>order_volume_sum": "desc"
}
},
"aggs": {
"nested_data_object": {
"nested": {
"path": "data"
},
"aggs": {
"order_volume_by_range": {
"filter": {
"range": {
"data.date": {
"gte": "2018-11-26",
"lte": "2018-11-27"
}
}
},
"aggs": {
"order_volume_sum": {
"sum": {
"field": "data.ord_vol"
}
}
}
}
}
}
}
}
}
}

Resources