How to build charts from Elasticsearch query - elasticsearch

I have a command which is able to run in Elasticsearch following something similar to https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movavg-aggregation.html#_prediction
GET linux_cpu*/_search?search_type=count
{
"aggs": {
"my_date_histo": {
"date_histogram": {
"field": "#timestamp",
"interval": "day"
},
"aggs": {
"the_sum": {
"avg": {
"field": "CPU(%)"
}
},
"the_movavg": {
"moving_avg": {
"bucketsPath": "the_sum",
"window": 90,
"model": "holt_winters",
"settings": {
"type": "add",
"alpha": 0.8,
"beta": 0.2,
"gamma": 0.7,
"period": 30
},
"predict": 30
}
}
}
}
}
}
However, I don't know how can I generate a graph based on the query. Could anyone help with this?

Related

Elasticsearch: How set 'doc_count' of a FILTER-Aggregation in relation to total 'doc_count'

A seemingly very trivial problem prompted me today to read the Elasticsearch documentation again diligently. So far, however, I have not come across the solution....
Question:
is ther's a simple way to set the doc_count of a filter aggregation in relation to the total doc_count?
Here's a snippet from my search-request-json.
In the feature_occurrences aggregation I filtered documents.
Now I want to calculate the ratio filtered/all Docs in each time bucket.
GET my_index/_search
{
"aggs": {
"time_buckets": {
"date_histogram": {
"field": "date",
"calendar_interval": "1d",
"min_doc_count": 0
},
"aggs": {
"feature_occurrences": {
"filter": {
"term": {
"x": "y"
}
}
},
"feature_occurrences_per_doc" : {
// feature_occurences.doc_count / doc_count
}
Any Ideas ?
You can use bucket_script to calc the ratio:
{
"aggs": {
"date": {
"date_histogram": {
"field": "#timestamp",
"interval": "hour"
},
"aggs": {
"feature_occurrences": {
"filter": {
"term": {
"cloud.region": "westeurope"
}
}
},
"ratio": {
"bucket_script": {
"buckets_path": {
"doc_count": "_count",
"features_count": "feature_occurrences._count"
},
"script": "params.features_count / params.doc_count"
}
}
}
}
}
}
Elastic bucket script doc:
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html

How to get hours between Min and Max date in Elasticsearch Aggregation?

How can I calculate hours between max and min dates (same tree level of max and min) in Elasticsearch?
My Query:-
{
"size": 0,
"query": {
"bool": {
"must": []
}
},
"aggs": {
"group_by_areaId": {
"terms": {
"size": 100000,
"field": "areaId.keyword"
},
"aggs": {
"4m": {
"date_histogram": {
"field": "timestamp",
"format": "yyyy-MM-dd'T'HH:mm:ssZZ",
"interval": "4m",
"order": {
"_key": "asc"
}
},
"aggs": {
"maxDate": {
"max": {
"field": "timestamp"
}
},
"minDate": {
"min": {
"field": "timestamp"
}
}
}
}
}
}
}
}
And the response (short) as,
"aggregations": {
"group_by_areaId": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "key1",
"doc_count": 15,
"4m": {
"buckets": [
{
"key_as_string": "2020-02-12T06:08:00+0000",
"key": 1581487680000,
"doc_count": 3,
"minDate": {
"value": 1.581487847E12,
"value_as_string": "2020-02-12T06:10:47Z"
},
"maxDate": {
"value": 1.58148791E12,
"value_as_string": "2020-02-12T06:11:50Z"
},
*// Need hours between maxDate and minDate here
//{
// "hours" : "0.0175" (maxDate-minDate)
//}*
}
]
}
}
]
}
}
Anyone please help me to find out the solution?
Thanks in Advance.
You can leverage the bucket_script pipeline aggregation in order to compute the difference between min and max for each bucket.
Simply add the following at the same level as minDate and maxDate:
"hours": {
"bucket_script": {
"buckets_path": {
"min": "minDate",
"max": "maxDate"
},
"script": "(params.max - params.min) / 3600000"
}
}
For your sample data above, the result in this case would be 0.0175 (i.e. roughly 1 minute)

Elasticsearch Query 30Day Price Difference

I currently have an elasticsearch indexs for a product that spans a year each index separated by month (i think, could be by year if i dont have as much data as i think i do). Each day a process grabs all the prices of these products and puts them into elasticsearch. I am trying to build a query that can give me the percent change within the last 30days of each product.
Example...
{
"timestamp": "2019-09-18T02:38:51.417Z",
"productId": 1,
"marketPrice": 5.00,
"lowPrice": 4.30
},
{
"timestamp": "2019-08-30T02:38:51.417Z", (THIS SHOULD BE IGNORED)**
"productId": 1,
"marketPrice": 100.00,
"lowPrice": 200.15
},
{
"timestamp": "2019-08-18T02:38:51.417Z",
"productId": 1,
"marketPrice": 10.00,
"lowPrice": 2.15
},
{
"timestamp": "2019-09-18T02:38:51.417Z",
"productId": 2,
"marketPrice": 2.00,
"lowPrice": 1.00
},
{
"timestamp": "2019-08-18T02:38:51.417Z",
"productId": 2,
"marketPrice": 3.00,
"lowPrice": 2.00
}
Result Example
{
"productId": 1,
"marketPriceChangeWithin30Days": 200%,
"lowPriceChangeWithin30Days": 200%
},
{
"productId": 2,
"marketPriceChangeWithin30Days": 150%,
"lowPriceChangeWithin30Days": 200%
}
** The (THIS SHOULD BE IGNORED) is because the only two values that should be compared are the latest timestamp and the closest timestamp that is around 30days in the past.
The query would then return the product id 1 and 2 with the percent changed in the result as shown in the example response.
You can leverage the derivative pipeline aggregation to achieve exactly what you expect:
POST /sales/_search
{
"size": 0,
"aggs": {
"sales_per_month": {
"date_histogram": {
"field": "timestamp",
"interval": "month"
},
"aggs": {
"marketPrice": {
"sum": {
"field": "marketPrice"
}
},
"lowPrice": {
"sum": {
"field": "lowPrice"
}
},
"marketPriceDiff": {
"derivative": {
"buckets_path": "marketPrice"
}
},
"lowPriceDiff": {
"derivative": {
"buckets_path": "lowPrice"
}
}
}
}
}
}
UPDATE:
Given your updated requirements, I'd suggest using the serial_diff pipeline aggregation with a lag of 30 days:
POST /sales/_search
{
"size": 0,
"query": {
"range": {
"timestamp": {
"gte": "now-31d",
"lte": "now"
}
}
},
"aggs": {
"products": {
"terms": {
"field": "productId",
"size": 10
},
"aggs": {
"histo": {
"date_histogram": {
"field": "timestamp",
"interval": "day",
"min_doc_count": 0
},
"aggs": {
"marketPrice": {
"avg": {
"field": "marketPrice"
}
},
"lowPrice": {
"avg": {
"field": "lowPrice"
}
},
"30d_diff_marketPrice": {
"serial_diff": {
"buckets_path": "marketPrice",
"lag": 30
}
},
"30d_diff_lowPrice": {
"serial_diff": {
"buckets_path": "lowPrice",
"lag": 30
}
}
}
}
}
}
}
}

Subtract numeric fields between two documents with different timestamp

Lets say I have these data samples:
{
"date": "2019-06-16",
"rank": 150
"name": "doc 1"
}
{
"date": "2019-07-16",
"rank": 100
"name": "doc 1"
}
{
"date": "2019-06-16",
"rank": 50
"name": "doc 2"
}
{
"date": "2019-07-16",
"rank": 80
"name": "doc 2"
}
The expected result is by subtracting the rank field from two same name of docs with different date (old date - new date):
{
"name": "doc 1",
"diff_rank": 50
}
{
"name": "doc 2",
"diff_rank": -30
}
And sort by diff_rank if possible, otherwise I will just sort manually after getting the result.
What I have tried is by using date_histogram and serial_diff but some results are missing the diff_rank value in somehow which I am sure the data exist:
{
"aggs" : {
"group_by_name": {
"terms": {
"field": "name"
},
"aggs": {
"days": {
"date_histogram": {
"field": "date",
"interval": "day"
},
"aggs": {
"the_rank": {
"sum": {
"field": "rank"
}
},
"diff_rank": {
"serial_diff": {
"buckets_path": "the_rank",
"lag" : 30 // 1 month or 30 days in this case
}
}
}
}
}
}
}
}
The help will be much appreciated to solve my issue above!
Finally, I found a method from official doc using Filter, Bucket Script aggregation and Bucket Sort to sort the result. Here is the final snippet code:
{
"size": 0,
"aggs" : {
"group_by_name": {
"terms": {
"field": "name",
"size": 50,
"shard_size": 10000
},
"aggs": {
"last_month_rank": {
"filter": {
"term": {"date": "2019-06-17"}
},
"aggs": {
"rank": {
"sum": {
"field": "rank"
}
}
}
},
"latest_rank": {
"filter": {
"term": {"date": "2019-07-17"}
},
"aggs": {
"rank": {
"sum": {
"field": "rank"
}
}
}
},
"diff_rank": {
"bucket_script": {
"buckets_path": {
"lastMonthRank": "last_month_rank>rank",
"latestRank": "latest_rank>rank"
},
"script": "params.lastMonthRank - params.latestRank"
}
},
"rank_bucket_sort": {
"bucket_sort": {
"sort": [
{"diff_rank": {"order": "desc"}}
],
"size": 50
}
}
}
}
}
}

ElasticSearch: Query syntax is painful

I just have started working on ElasticSearch and it is painful to write in Painless. So difficult to see the connections between brackets, too many spaces. I am working on the outlier detection and as an example, this is what the code looks like:
"query": {
"filtered": {
"filter": {
"range": {
"hour": {
"gte": "{{start}}",
"lte": "{{end}}"
}
}
}
}
},
"size": 0,
"aggs": {
"metrics": {
"terms": {
"field": "metric",
"size": 5
},
"aggs": {
"queries": {
"terms": {
"field": "query",
"size": 500
},
"aggs": {
"series": {
"date_histogram": {
"field": "hour",
"interval": "hour"
},
"aggs": {
"avg": {
"avg": {
"field": "value"
}
},
"movavg": {
"moving_avg": {
"buckets_path": "avg",
"window": 24,
"model": "simple"
}
},
"surprise": {
"bucket_script": {
"buckets_path": {
"avg": "avg",
"movavg": "movavg"
},
"script": "(avg - movavg).abs()"
}
}
}
},
"largest_surprise": {
"max_bucket": {
"buckets_path": "series.surprise"
}
}
}
},
"ninetieth_surprise": {
"percentiles_bucket": {
"buckets_path": "queries>largest_surprise",
"percents": [
90
]
}
}
}
}
I solve it by creating my own convention for the code in order for it to be readable. It is based only on the closing parenthesis and the indentation helps in readability. It just opens a new line whenever it finds a closing brackets' group (except the ones inline like "{{start}}") It is something like this:
{
"query":{"filtered":{"filter":{"range":{"hour":{"gte":"{{start}}","lte":"{{end}}"}}}}},
"size":0,
"aggs":{"metrics":{"terms":{"field":"metric",“size”:5},
"aggs":{"queries":{"terms":{"field":"query","size":500},
"aggs":{"series": {"date_histogram":{"field":"hour","interval":"hour"},
"aggs":{"avg":{"avg":{"field":"value"}},
....
I would love to know whether there is any other convention which helps in readability and to follow the lines of code. What is being used in the community?
CODE from: https://www.elastic.co/blog/implementing-a-statistical-anomaly-detector-part-1

Resources