Script to return array for scripted metric aggregation from combine - elasticsearch

For scripted metric aggregation , in the example shown in the documentation , the combine script returns a single number.
Instead here , can i pass an array or hash ?
I tried doing it , though it did not return any error , i am not able to access those values from reduce script.
In reduce script per shard i am getting an instance when converted to string read as 'Script2$_run_closure1#52ef3bd9'
Kindly let me know , if this can be accomplished in any way.

At least for Elasticsearch version 1.5.1 you can do so.
For example, we can modify Elasticsearch example (scripted metric aggregation) to receive an average profit (profit divided by number of transactions):
{
"query": {
"match_all": {}
},
"aggs": {
"avg_profit": {
"scripted_metric": {
"init_script": "_agg['transactions'] = []",
"map_script": "if (doc['type'].value == \"sale\") { _agg.transactions.add(doc['amount'].value) } else { _agg.transactions.add(-1 * doc['amount'].value) }",
"combine_script": "profit = 0; num_of_transactions = 0; for (t in _agg.transactions) { profit += t; num_of_transactions += 1 }; return [profit, num_of_transactions]",
"reduce_script": "profit = 0; num_of_transactions = 0; for (a in _aggs) { profit += a[0] as int; num_of_transactions += a[1] as int }; return profit / num_of_transactions as float"
}
}
}
}
NOTE: this is just a demo for an array in the combine script, you can calculate average easily without using any arrays.
The response will look like:
"aggregations" : {
"avg_profit" : {
"value" : 42.5
}
}

Related

Elasticsearch: Multiply each nested element plus aggregation

Let's imagine an index composed of 2 documents like this one:
doc1 = {
"x":1,
"y":[{brand:b1, value:1},
{brand:b2, value:2}]
},
doc2 = {
"x":2,
"y":[{brand:b1, value:0},
{brand:b2, value:3}]
}
Is it possible to multiply each values of y by x for each document and then do sum aggregation based on brand term to get this result:
b1: 1
b2: 8
If not, could it be done with any other mapping types ?
This is a highly custom use-case so I don't think there's some sort of a pre-optimized mapping for it.
What I would suggest is the following:
Set up an index w/ y being nested:
PUT xy/
{"mappings":{"properties":{"y":{"type":"nested"}}}}
Ingest the docs from your example:
POST xy/_doc
{"x":1,"y":[{"brand":"b1","value":1},{"brand":"b2","value":2}]}
POST xy/_doc
{"x":2,"y":[{"brand":"b1","value":0},{"brand":"b2","value":3}]}
Use a scripted_metric aggregation to compute the products and add them up in a shared HashMap:
GET xy/_search
{
"size": 0,
"aggs": {
"multiply_and_add": {
"scripted_metric": {
"init_script": "state.by_brands = [:]",
"map_script": """
def x = params._source['x'];
for (def brand_pair : params._source['y']) {
def brand = brand_pair['brand'];
def product = x * brand_pair['value'];
if (state.by_brands.containsKey(brand)) {
state.by_brands[brand] += product;
} else {
state.by_brands[brand] = product;
}
}
""",
"combine_script": "return state",
"reduce_script": "return states"
}
}
}
}
which would yield something along the lines of
{
...
"aggregations":{
"multiply_and_add":{
"value":[
{
"by_brands":{ <----
"b2":8,
"b1":1
}
}
]
}
}
}
UPDATE
The combine_script could look like this:
def combined_states = [:];
for (def state : states) {
for (def brand_pair : state['by_brands'].entrySet()) {
def key = brand_pair.getKey();
def value = brand_pair.getValue();
if (combined_states.containsKey(key)) {
combined_states[key] += (float)value;
break;
}
combined_states[key] = (float)value
}
}

Elasticsearch scripted_metric null_pointer_exception

I'm trying to use the scripted_metric aggs of Elasticsearch and normally, it's working perfectly fine with my other scripts
However, with script below, I'm encountering an error called "null_pointer_exception" but they're just copy-pasted scripts and working for 6 modules already
$max = 10;
{
"query": {
"match_all": {}
//omitted some queries here, so I just turned it into match_all
}
},
"aggs": {
"ARTICLE_CNT_PDAY": {
"histogram": {
"field": "pub_date",
"interval": "86400"
},
"aggs": {
"LATEST": {
"nested": {
"path": "latest"
},
"aggs": {
"SUM_SVALUE": {
"scripted_metric": {
"init_script": "
state.te = [];
state.g = 0;
state.d = 0;
state.a = 0;
",
"map_script": "
if(state.d != doc['_id'].value){
state.d = doc['_id'].value;
state.te.add(state.a);
state.g = 0;
state.a = 0;
}
state.a = doc['latest.soc_mm_score'].value;
",
"combine_script": "
state.te.add(state.a);
double count = 0;
for (t in state.te) {
count += ((t*10)/$max)
}
return count;
",
"reduce_script": "
double count = 0;
for (a in states) {
count += a;
}
return count;
"
}
}
}
}
}
}
}
}
I tried running this script in Kibana, and here's the error message:
What I'm getting is, that there's something wrong with the reduce_script portion, tried to change this part:
FROM
for (a in states) {
count += a;
}
TO
for (a in states) {
count += 1;
}
And worked perfectly fine, I felt that the a variable isn't getting what it's supposed to hold
Any ideas here? Would appreciate your help, thank you very much!
The reason is explained here:
If a parent bucket of the scripted metric aggregation does not collect any documents an empty aggregation response will be returned from the shard with a null value. In this case the reduce_script's states variable will contain null as a response from that shard. reduce_script's should therefore expect and deal with null responses from shards.
So obviously one of your buckets is empty, and you need to deal with that null like this:
"reduce_script": "
double count = 0;
for (a in states) {
count += (a ?: 0);
}
return count;
"

Count of unique aggregration doc_count in ElasticSearch

Using ElasticSearch 7.0, I can get how many log I have for each user with an aggregation :
"aggs": {
"by_user": {
"terms": {
"field": "user_id",
}
}
}
This returns me something like:
user32: 25
user52: 20
user10: 20
...
What I would like is to know how many user have 25 logs, and how many user have 20 logs etc. The ideal result would be something like :
25: 1
20: 2
19: 4
12: 54
Because 54 users have 12 logs lines.
How can I make an aggregation that returns this result ?
It sounds like you can use Bucket Script Aggregation to simplify your query but the problem is that there is still open PR on this topic.
So, for now i think the simplest is to use painless script with Scripted Metric Aggregation. I recommend you to carefully read about the stages of its execution.
In terms of code I know it's not the best algorithm for your problem but quick and dirty your query could look something like this:
GET my_index/_search
{
"size": 0,
"query" : {
"match_all" : {}
},
"aggs": {
"profit": {
"scripted_metric": {
"init_script" : "state.transactions = [:];",
"map_script" :
"""
def key = doc['firstName.keyword'];
if (key != null && key.value != null) {
def value = state.transactions[key.value];
if(value==null) value = 0;
state.transactions[key.value] = value+1
}
""",
"combine_script" : "return state.transactions",
"reduce_script" :
"""
def result = [:];
for (state in states) {
for (item in state.entrySet()) {
def key=item.getValue().toString();
def value = result[key];
if(value==null)value = 0;
result[key]=value+1;
}
}
return result;
"""
}
}
}
}

create parametric scripted_metric in elasticsearch

I want to use scripted_metric within an aggregation. I have some parametric values in my script that I want to set those per query, It is possible to create this query at all?
below an example for what I'm looking for
"aggs": {
"testAgg": {
"scripted_metric": {
"init_script": "_agg['maximum'] = []",
"map_script": "max = 0; for(tv in _source.tvs){ if(tv.att1>= param1 && tv.attr2 <= param2 && tv.att3 > max){max = tv.att3; }}; _agg.maximum.add(max);",
"combine_script": "sum = 0; for (m in _agg.maximum) { sum += m }; return sum;",
"reduce_script": "sum = 0; for (a in _aggs) { sum += a }; return sum;"
}
}
}
param1 and param2 are my parametric values, how to change this aggregation for my purpose?
tnx :)
You can do it by specifying a global params map
"aggs": {
"testAgg": {
"scripted_metric": {
"params": {
"_agg": {},
"param1": 10,
"param2": 20
},
"init_script": "_agg['maximum'] = []",
"map_script": "max = 0; for(tv in _source.tvs){ if(tv.att1>= param1 && tv.attr2 <= param2 && tv.att3 > max){max = tv.att3; }}; _agg.maximum.add(max);",
"combine_script": "sum = 0; for (m in _agg.maximum) { sum += m }; return sum;",
"reduce_script": "sum = 0; for (a in _aggs) { sum += a }; return sum;"
}
}
}

MongoDB multikey index write performance degrading

In MongoDB I have a collection with documents having an array with subdocuments I would like to have an index on:
{
_id : ObjectId(),
members : [
{ ref : ObjectId().str, ... },
{ ref : ObjectId().str, ... },
...
]
}
The index is on the ref field, such that I can quickly find all documents having a particular 'ref' in its members:
db.test.ensureIndex({ "members.ref" : 1 });
I noticed that the performance of pushing an additional subdocument to the array degrades fast as the array length goes above a few thousand. If I instead use an index on an array of strings, the performance does not degrade.
The following code demonstrates the behavior:
var _id = ObjectId("522082310521b655d65eda0f");
function initialize () {
db.test.drop();
db.test.insert({ _id : _id, members : [], memberRefs : [] });
}
function pushToArrays (n) {
var total, err, ref;
total = Date.now();
for (var i = 0; i < n; i++) {
ref = ObjectId().str;
db.test.update({ _id : _id }, { $push : { members : { ref : ref }, memberRefs : ref } });
err = db.getLastError();
if (err) {
throw err;
}
if ((i + 1) % 1000 === 0) {
print("pushed " + (i + 1));
}
}
total = Date.now() - total;
print("pushed " + n + " in " + total + "ms");
}
initialize();
pushToArrays(5000);
db.test.ensureIndex({ "members.ref" : 1 });
pushToArrays(10);
db.test.dropIndexes();
db.test.ensureIndex({ "memberRefs" : 1 });
pushToArrays(10);
db.test.dropIndexes();
E.g., using MongoDB 2.4.6 on my machine I see the following times used to push 10 elements on arrays of length 5000:
Index on "members.ref": 37272ms
Index on "memberRefs": 405ms
That difference seems unexpected. Is this a problem with MongoDB or my use of the multikey index? Is there a recommended way of handling this? Thanks.
Take a look at SERVER-8192 and SERVER-8193. Hopefully that will help answer your question!

Resources