Elasticsearch : Indexing rate slows down gradually - elasticsearch

I am writing to ES from Spark streaming at a rate of around 80,000 EPS.
which was running fine earlier, but recently the indexing rate slows down gradually and the results in spark-jobs piling up.
what are the cluster settings which I can tweak/verify to address this issue
cluster settings :-
{
"persistent": {
"cluster": {
"routing": {
"allocation": {
"cluster_concurrent_rebalance": "160",
"node_concurrent_recoveries": "2",
"disk": {
"threshold_enabled": "true",
"watermark": {
"low": "85%",
"high": "95%"
}
},
"node_initial_primaries_recoveries": "40",
"enable": "all"
}
}
},
"indices": {
"breaker": {
"fielddata": {
"limit": "50%"
}
},
"recovery": {
"concurrent_streams": "80",
"max_bytes_per_sec": "50mb"
},
"store": {
"throttle": {
"max_bytes_per_sec": "500gb"
}
}
},
"threadpool": {
"bulk": {
"queue_size": "5000",
"size": "40",
"type": "fixed"
}
},
"discovery": {
"zen": {
"minimum_master_nodes": "2"
}
}
},
"transient": {
"cluster": {
"routing": {
"allocation": {
"enable": "all"
}
}
}
}
}
cluster status:-
{
"cluster_name": "**********",
"status": "green",
"timed_out": false,
"number_of_nodes": 105,
"number_of_data_nodes": 100,
"active_primary_shards": 7315,
"active_shards": 7330,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100
}

Related

Elasticsearch - Count number of occurrence perd field per document

Is it possible to calculate the number of occurence of distinct values in a list field.
For example, let the following data:
[
{
"page":1,
"colors":[
{
"color": red
},
{
"color": white
},
{
"color": red
}
]
},
{
"page":2,
"colors":[
{
"color": yellow
},
{
"color": yellow
}
]
}
]
Is it possible to get a result as the follwing:
{
"page":1,
"colors_count":[
{
"Key": red,
"Count": 2
},
{
"Key": white,
"Count": 1
},
]
},
{
"page":2,
"colors_count":[
{
"Key": yellow,
"Count": 2
}
]
}
I tried using term aggregation but I got the number of distinct values, so for page:1 i got red:1 and white:1.
Yes, you can do it. you will have to use nested_field type and nested_Agg
Mapping:
PUT colors
{
"mappings": {
"properties": {
"page" : { "type": "keyword" },
"colors": {
"type": "nested",
"properties": {
"color": {
"type": "keyword"
}
}
}
}
}
}
Insert Documents:
PUT colors/_doc/1
{
"page": 1,
"colors": [
{
"color": "red"
},
{
"color": "white"
},
{
"color": "red"
}
]
}
PUT colors/_doc/2
{
"page": 2,
"colors": [
{
"color": "yellow"
},
{
"color": "yellow"
}
]
}
Query:
GET colors/_search
{
"size" :0,
"aggs": {
"groupByPage": {
"terms": {
"field": "page"
},
"aggs": {
"colors": {
"nested": {
"path": "colors"
},
"aggs": {
"genres": {
"terms": {
"field": "colors.color"
}
}
}
}
}
}
}
}
Output:
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 2,
"relation": "eq"
},
"max_score": null,
"hits": []
},
"aggregations": {
"groupByPage": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "1", // page field value
"doc_count": 1,
"colors": {
"doc_count": 3,
"genres": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "red",
"doc_count": 2
},
{
"key": "white",
"doc_count": 1
}
]
}
}
},
{
"key": "2", // page field value
"doc_count": 1,
"colors": {
"doc_count": 2,
"genres": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "yellow",
"doc_count": 2
}
]
}
}
}
]
}
}
}

Es aggregation query is slow. How to improve?

We have 7552642 documents with total size of 94.290219647 gibabyte data at elasticsearch production cluster.
XMS and XMX:
/usr/bin/java -Xms20g -Xmx20g
Cluster Information:
{
"cluster_name": "production",
"status": "green",
"timed_out": false,
"number_of_nodes": 2,
"number_of_data_nodes": 2,
"active_primary_shards": 10,
"active_shards": 20,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100
}
Mapping
"posts": {
"mappings": {
"information": {
"_all": {
"enabled": true
},
"properties":{
"post_created_time": {
"type": "date"
},
"post_impressions": {
"type": "double"
},
"post_interactions": {
"type": "long"
},
"post_media_value": {
"type": "float"
},
"post_message": {
"type": "text"
},
"post_reach": {
"type": "double"
},
"post_type": {
"type": "keyword"
},
"profile_id": {
"type": "long"
},
"profile_name": {
"type": "text"
},
"platform_followed_by" :{
"type: "long"
}
}
}
}
}
Aggregation query:
{
"size":0,
"aggs":{
"total_influencers":{
"cardinality":{
"field":"profile_id"
}
},
"profiles_reach_bucket":{
"terms":{
"size":2147483639,
"field":"profile_id"
},
"aggs":{
"media_reach_bucket":{
"terms":{
"field":"_type",
"size":2147483639
},
"aggs":{
"media_reach":{
"avg":{
"field":"post_reach"
}
}
}
},
"total_media_reach":{
"sum_bucket":{
"buckets_path":"media_reach_bucket>media_reach"
}
}
}
},
"total_reach":{
"sum_bucket":{
"buckets_path":"profiles_reach_bucket>total_media_reach"
}
},
"total_impressions":{
"sum":{
"field":"post_impressions"
}
},
"total_interactions":{
"sum":{
"field":"post_interactions"
}
},
"total_followers":{
"sum":{
"field":"platform_followed_by"
}
},
"interactions_for_media_value":{
"terms":{
"size":2147483639,
"field":"_type"
},
"aggs":{
"interaction":{
"sum":{
"field":"post_interactions"
}
}
}
},
"impressions_for_media_value":{
"terms":{
"size":2147483639,
"field":"_type"
},
"aggs":{
"impression":{
"sum":{
"field":"post_impressions"
}
}
}
}
},
"query":{
"bool":{
"must":[
{
"query_string":{
"query":"post_message:*food* post_description:*food* post_title:*food* "
}
}
],
"filter":[
{
"range":{
"post_created_time":{
"gte":1533427200,
"lte":1549324800
}
}
}
]
}
}
}
Result:
The above query took
"took": 6685,
"timed_out": false,
"_shards": {
"total": 10,
"successful": 10,
"skipped": 0,
"failed": 0
}
Any suggestion to optimize will be grateful.

How to get only matched documents from ElasticSearch for a nested array field

I am using an array field in elastic search and this array contains multiple JSON documents. Like this:
{
"imei": 358739050280669,
"date": "2018-02-20",
"id": 86739126,
"totalData": [
{
"gpsdt": "2018-02-20",
"satno": 0,
"analog3": -1,
"digital1": 0,
"digital2": 1,
"digital3": 1,
"digital4": 2,
"lastgpsdt": "2018-02-20T11:54:00",
"longitude": 78.081218,
"odometer": 0,
"intbatlevel": 6,
"odo": 0,
"latitude": 29.951449,
"srtangle": 62,
"analog4": 13,
"speed": 0,
"analog2": -1,
"analog1": 9,
"extbatlevel": 0
},
{
"gpsdt": "2018-02-20",
"speed": 22,
"satno": 0,
"digital1": 0,
"digital2": 1,
"digital3": 1,
"digital4": 2,
"lastgpsdt": "2018-02-20T22:48:00",
"longitude": 78.062898,
"odometer": 0,
"intbatlevel": 6,
"odo": 113,
"latitude": 29.948898,
"srtangle": 67,
"analog4": 12,
"analog3": -1,
"analog2": -1,
"analog1": 8,
"extbatlevel": 0
}
]
}
Now I want to apply a filter on "imei" and a date range filter on "lastgpsdt" field and in output I want only those documents which matches to applied filter.
For example: I have to get data for imei no 358739050280669 and date range (field name is lastgpsdt) between 2018-02-20T10:54:00 and 2018-02-20T12:54:00
So it should return only one document (according to given data) from totalData array field.
Please suggest me a query to achieve this.
Output should be like below:
{
"imei": 358739050280669,
"date": "2018-02-20",
"id": 86739126,
"totalData": [
{
"gpsdt": "2018-02-20",
"satno": 0,
"analog3": -1,
"digital1": 0,
"digital2": 1,
"digital3": 1,
"digital4": 2,
"lastgpsdt": "2018-02-20T11:54:00",
"longitude": 78.081218,
"odometer": 0,
"intbatlevel": 6,
"odo": 0,
"latitude": 29.951449,
"srtangle": 62,
"analog4": 13,
"speed": 0,
"analog2": -1,
"analog1": 9,
"extbatlevel": 0
}
]
}
Thanks In Advance.
You can achieve this by using inner hits parameter of nested search query:
In many cases, it’s very useful to know which inner nested objects (in the case of nested) or children/parent documents (in the case of parent/child) caused certain information to be returned. The inner hits feature can be used for this. This feature returns per search hit in the search response additional nested hits that caused a search hit to match in a different scope.
If you have a mapping like this:
PUT /mygps
{
"mappings": {
"doc": {
"properties": {
"date": {
"type": "date"
},
"id": {
"type": "long"
},
"imei": {
"type": "long"
},
"totalData": {
"type": "nested",
"properties": {
"analog1": {
"type": "long"
},
"analog2": {
"type": "long"
},
"analog3": {
"type": "long"
},
"analog4": {
"type": "long"
},
"digital1": {
"type": "long"
},
"digital2": {
"type": "long"
},
"digital3": {
"type": "long"
},
"digital4": {
"type": "long"
},
"extbatlevel": {
"type": "long"
},
"gpsdt": {
"type": "date"
},
"intbatlevel": {
"type": "long"
},
"lastgpsdt": {
"type": "date"
},
"latitude": {
"type": "float"
},
"longitude": {
"type": "float"
},
"odo": {
"type": "long"
},
"odometer": {
"type": "long"
},
"satno": {
"type": "long"
},
"speed": {
"type": "long"
},
"srtangle": {
"type": "long"
}
}
}
}
}
}
}
The query might look like this:
POST /mygps/doc/_search
{
"query": {
"bool": {
"must": [
{
"term": {
"imei": "358739050280669"
}
},
{
"nested": {
"path": "totalData",
"query": {
"range": {
"totalData.lastgpsdt": {
"gte": "2018-02-20T10:54:00",
"lte": "2018-02-20T12:54:00"
}
}
},
"inner_hits": {}
}
}
]
}
},
"_source": ["imei", "date", "id"]
}
Which will produce the following output:
{
"hits": {
"total": 1,
"max_score": 2,
"hits": [
{
"_index": "mygps",
"_type": "doc",
"_id": "AWR5Em0m6jWoKaNfOwDA",
"_score": 2,
"_source": {
"date": "2018-02-20",
"imei": 358739050280669,
"id": 86739126
},
"inner_hits": {
"totalData": {
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_nested": {
"field": "totalData",
"offset": 0
},
"_score": 1,
"_source": {
"gpsdt": "2018-02-20",
"satno": 0,
"analog3": -1,
"digital1": 0,
"digital2": 1,
"digital3": 1,
"digital4": 2,
"lastgpsdt": "2018-02-20T11:54:00",
"longitude": 78.081218,
"odometer": 0,
"intbatlevel": 6,
"odo": 0,
"latitude": 29.951449,
"srtangle": 62,
"analog4": 13,
"speed": 0,
"analog2": -1,
"analog1": 9,
"extbatlevel": 0
}
}
]
}
}
}
}
]
}
}
Note that since you are not interested in non-matching items of totalData, I used Source Filtering to exclude that field from response at all. Instead, the matching items of totalData can be found under inner_hits.
Hope that helps!

why Elasticsearch doesn't return right results?

I'm using Elasticsearch 6.2 configured with one cluster of 2 nodes.
GET _cluster/health:
{
"cluster_name": "cluster_name",
"status": "green",
"timed_out": false,
"number_of_nodes": 2,
"number_of_data_nodes": 2,
"active_primary_shards": 47,
"active_shards": 94,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100
}
GET myindex/_settings:
{
"myindex": {
"settings": {
"index": {
"number_of_shards": "3",
"analysis": {
"analyzer": {
"url_split_analyzer": {
"filter": "lowercase",
"tokenizer": "url_split"
}
},
"tokenizer": {
"url_split": {
"pattern": "[^a-zA-Z0-9]",
"type": "pattern"
}
}
},
"number_of_replicas": "1",
"version": {
"created": "6020499"
}
}
}
}
}
here a snapshot of the _mappings structure:
"myindex": {
"mappings": {
"mytype": {
"properties": {
"#timestamp": {
"type": "date"
},
............
"active": {
"type": "short"
},
"id_domain": {
"type": "short",
"ignore_malformed": true
},
"url": {
"type": "text",
"similarity": "boolean",
"analyzer": "url_split_analyzer"
}
}
.......
I have casually found documents, within my index, that I cannot find if I query the index using the id_domain property.
For example:
GET /myindex/mytype/_search
{
"query": {
"bool": {
"must": [
{
"match": { "active": 1 }
}
]
}
}
}
output example:
{
"_index": "myindex",
"_type": "mytype",
"_id": "myurl",
"_score": 1,
"_source": {
"id_domain": "73993",
"active": 1,
"url": "myurl",
"#timestamp": "2018-05-21T10:55:16.247Z"
}
}
....
returns a list of documents where I found id_domain that I cannot find querying against that id domain, like this:
GET /myindex/mytype/_search
{
"query": {
"match": {
"id_domain": 73993 // with or without " got the same result
}
}
}
output
{
"took": 5,
"timed_out": false,
"_shards": {
"total": 3,
"successful": 3,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
I cannot understand why this happens.
I also tried to reindex the index but I got the same result.
I am convinced that I'm missing something.
Is there any reason about that behaviour?
Thank you
In your mapping, id_domain has type short, but in your document you have a value that is out of the bounds for short values ([-32,768 to 32,767]), i.e. 73993.
You need to change the type to integer and all will be fine

Elasticsearch, Nested Aggregations

Im writing dynamic query generation which allows to aggregate by any fields combination in the mapping. As the mapping(truncated) below, there are fields in nested type. e.g aggregate by [activities.activity,duration], or [activities.activity, activities.duration] or [applicationName, duration]
Mapping:
{
nested: {
properties: {
#timestamp: {
type: "date",
format: "dateOptionalTime"
},
activities: {
type: "nested",
include_in_parent: true,
properties: {
activity: {
type: "string",
index: "not_analyzed"
},
duration: {
type: "long"
},
entry: {
properties: {
blockName: {
type: "string",
index: "not_analyzed"
},
blockid: {
type: "string"
},
time: {
type: "date",
format: "dateOptionalTime"
}
}
},
exit: {
properties: {
blockName: {
type: "string",
index: "not_analyzed"
},
blockid: {
type: "string"
},
time: {
type: "date",
format: "dateOptionalTime"
}
}
},
seq: {
type: "integer"
}
}
},
applicationName: {
type: "string",
index: "not_analyzed"
},
duration: {
type: "long"
}
}
}}
Sample document:
{
"#timestamp": "2015-09-15T17:35:24.020Z",
"duration": "37616",
"applicationName": "my application name",
"activities": [{
"duration": "20362",
"entry": {
"blockid": "2",
"time": "2015-09-15T17:35:24.493Z",
"blockName": "My Self Service"
},
"exit": {
"blockid": "2",
"time": "2015-09-15T17:35:44.855Z",
"blockName": "My Self Service"
},
"seq": 1,
"activity": "Prompter v2.3"
}, {
"duration": "96",
"entry": {
"blockid": "2",
"time": "2015-09-15T17:35:45.268Z",
"blockName": "My Self Service"
},
"exit": {
"blockid": "2",
"time": "2015-09-15T17:35:45.364Z",
"blockName": "My Self Service"
},
"seq": 2,
"activity": "Start v2.5"
}, {
"duration": "15931",
"entry": {
"blockid": "2",
"time": "2015-09-15T17:35:45.669Z",
"blockName": "My Self Service"
},
"exit": {
"blockid": "2",
"time": "2015-09-15T17:36:01.600Z",
"blockName": "My Self Service"
},
"seq": 3,
"activity": "System v2.3"
}]}
Sample query:
{
"size": 0,
"aggs": {
"dim0": {
"nested" : {
"path": "activities"
},
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs": {
"dim_reverse":{
"reverse_nested":{},
"aggs":{
"avg_duration": {
"avg": {
"field": "duration"
}
}
}
}
}
}
}
}
}}
Question,
as you can see in the query, when averaging on a root level field under a nested field. reverse_nested must be included so that the root level field "duration" can be seen. That means when generating the query, we need to check the combination of fields to see if the parent/child fields are the cases of fields are nested, nested under the same path or at the root level, then generate the proper query. This may be more complicated when aggregating on more fields, for example, aggregate by [applicationName, activities.duration, duration,activities.activity]. Does anyone know more elegant way to do that? the logic may be simpler if we can specify absolute path
Not real an answer to my question but adding more examples as it may help others to understand nested aggregation better.
aggs field average field
case1 yes yes
case2 yes no
case3 no yes
case4 no no
yes->nested type, no->not nested type
Case1 with same path
Query
{
"size": 0,
"aggs": {
"dim0": {
"nested" : {
"path": "activities"
},
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs":{
"avg_duration": {
"avg": {
"field": "activities.duration"
}
}
}
}
}
}
}}
Result:
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim0": {
"doc_count": 3,
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "Prompter v2.3",
"doc_count": 1,
"avg_duration": {
"value": 20362.0
}
}, {
"key": "Start v2.5",
"doc_count": 1,
"avg_duration": {
"value": 96.0
}
}, {
"key": "System v2.3",
"doc_count": 1,
"avg_duration": {
"value": 15931.0
}
}]
}
}
}}
case1, both fields are nested, but reverse_nested to have the same average value on all the "activities.duration"
query
{
"size": 0,
"aggs": {
"dim0": {
"nested" : {
"path": "activities"
},
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs": {
"dim_reverse1":{
"reverse_nested":{
},
"aggs":{
"avg_duration": {
"avg": {
"field": "activities.duration"
}
}
}
}
}
}
}
}
}}
result
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim0": {
"doc_count": 3,
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "Prompter v2.3",
"doc_count": 1,
"dim_reverse1": {
"doc_count": 1,
"avg_duration": {
"value": 12129.666666666666
}
}
}, {
"key": "Start v2.5",
"doc_count": 1,
"dim_reverse1": {
"doc_count": 1,
"avg_duration": {
"value": 12129.666666666666
}
}
}, {
"key": "System v2.3",
"doc_count": 1,
"dim_reverse1": {
"doc_count": 1,
"avg_duration": {
"value": 12129.666666666666
}
}
}]
}
}
}}
Case3
Query
{
"size": 0,
"aggs": {
"dim1": {
"terms": {
"field": "applicationName"
},
"aggs":{
"avg_duration": {
"avg": {
"field": "activities.duration"
}
}
}
}
}}
Result
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "my application name",
"doc_count": 1,
"avg_duration": {
"value": 12129.666666666666
}
}]
}
}}
Case2 includes reserver_nested to back to the root level
Query
{
"size": 0,
"aggs": {
"dim0": {
"nested" : {
"path": "activities"
},
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs": {
"dim_reverse":{
"reverse_nested":{},
"aggs":{
"avg_duration": {
"avg": {
"field": "duration"
}
}
}
}
}
}
}
}
}}
Result:
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim0": {
"doc_count": 3,
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "Prompter v2.3",
"doc_count": 1,
"dim_reverse": {
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}
}, {
"key": "Start v2.5",
"doc_count": 1,
"dim_reverse": {
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}
}, {
"key": "System v2.3",
"doc_count": 1,
"dim_reverse": {
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}
}]
}
}
}}
Case2, without specify the nested path
Query
{
"size": 0,
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs":{
"avg_duration": {
"avg": {
"field": "duration"
}
}
}
}
}}
Result The result is identical to the previous one
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "Prompter v2.3",
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}, {
"key": "Start v2.5",
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}, {
"key": "System v2.3",
"doc_count": 1,
"avg_duration": {
"value": 37616.0
}
}]
}
}
}
Case2, without specifying reserver_nested, "duration" at the root level is not seen
Query
{
"size": 0,
"aggs": {
"dim0": {
"nested" : {
"path": "activities"
},
"aggs": {
"dim1": {
"terms": {
"field": "activities.activity"
},
"aggs":{
"avg_duration": {
"avg": {
"field": "duration"
}
}
}
}
}
}
}}
Result
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 0.0,
"hits": []
},
"aggregations": {
"dim0": {
"doc_count": 3,
"dim1": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [{
"key": "Prompter v2.3",
"doc_count": 1,
"avg_duration": {
"value": null
}
}, {
"key": "Start v2.5",
"doc_count": 1,
"avg_duration": {
"value": null
}
}, {
"key": "System v2.3",
"doc_count": 1,
"avg_duration": {
"value": null
}
}]
}
}
}}

Resources