D3 Json to Hardcoded variables - d3.js

I want to turn the fetching of data from JSON to a hardcoded variable. This is the link to the d3 treemap chart: Zoomable Treemap Template.

If you mean you want to put JSON data into a variable, then you would represent this:
[
{
"key": "Asia",
"values": [
{
"key": "India",
"value": 1236670000
},
{
"key": "China",
"value": 1361170000
}]
},
{
"key": "Africa",
"values": [
{
"key": "Nigeria",
"value": 173615000
},
{
"key": "Egypt",
"value": 83661000
}]
}
]
like this:
var x = [
{
"key": "Asia",
"values": [
{
"key": "India",
"value": 1236670000
},
{
"key": "China",
"value": 1361170000
}]
},
{
"key": "Africa",
"values": [
{
"key": "Nigeria",
"value": 173615000
},
{
"key": "Egypt",
"value": 83661000
}]
}
];

Related

Filter on nested field with Elasticsearch

I have 2 entities in my project, users and schedule
I need to create a page on which, in the form of a weekly calendar, I can display all employees and their shift for each day
Example:
https://monosnap.com/file/tEb3rUYNRmredPWOdfxRBTBpqkh36H
For this, I created a new index in which I indexed all employees. Each employee has a nested field, where his shifts are stored
The problem is that I can't figure out aggregations and filters.
I need to
there is always a filter by date that refers to the shift field property.
It doesn't matter if there are suitable shifts or not, we show ALL employees
the following 2 aggregations, user role and type of shift are also displayed.
user role filters the list of employees
type of shift, shows or hides associated shifts
An example of my request
{
"aggs": {
"shifts.ref_type": {
"nested": {
"path": "shifts"
},
"aggs": {
"shifts.ref_type": {
"terms": {
"field": "shifts.ref_type",
"size": 1000
}
}
}
},
"role": {
"terms": {
"field": "role",
"size": 1000
}
},
"name": {
"terms": {
"field": "name",
"size": 1000
}
}
},
"query": {
"bool": {
"must": [
{
"term": {
"_routing": "1"
}
}
],
"should": [
{
"range": {
"shifts.date_from": {
"lte": 1636923600,
"gte": 1636318800
}
}
}
]
}
},
"sort": [
{
"created": "ASC"
}
],
"size": 1
}
Sample response
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 4,
"successful": 4,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 36,
"relation": "eq"
},
"max_score": null,
"hits": [
{
"_index": "employee_shift",
"_type": "_doc",
"_id": "a8abf060-25c8-45ee-a50e-02a2e2ad1c40",
"_score": null,
"_routing": "1",
"_source": {
"created": 1633967157,
"type": "user",
"title": null,
"description": "",
"uuid": "a8abf060-25c8-45ee-a50e-02a2e2ad1c40",
"author": "System",
"author:name": "System",
"author:role": "",
"acc": 1,
"property": [
1
],
"status": "Enabled",
"class": [
""
],
"weight": "",
"tags": [],
"language": "en",
"ref_source_id": null,
"ref_source_helper": null,
"ref_property": [
"test hostel2"
],
"ref_property_default": "test hostel2",
"name": "Housekeeper 1",
"role": [
"Housekeeper"
],
"role:weight": "2",
"role:id": [
5
],
"pay_rate": null,
"experience": null,
"supervisor": null,
"gender": null,
"units": [
"102",
"103",
"106",
"107",
"110",
"111",
"116",
"117",
"120",
"121",
"124",
"125",
"128",
"129",
"132",
"133",
"136",
"137"
],
"task_inspection": "All tasks",
"shifts": [
{
"uuid": "f48ae398-0668-4693-b335-2fee3baa2941",
"ref_type": "Work",
"ref_type:color": "",
"date_from": "1635196500",
"date_to": "1635197400",
"notes": null
},
{
"uuid": "8b4d8148-2583-4ccf-a1cc-ae5e6d1e728e",
"ref_type": "Work",
"ref_type:color": "",
"date_from": "1635287400",
"date_to": "1635289200",
"notes": null
},
{
"uuid": "3f5520d8-8108-4abd-8e2a-70c00faf6994",
"ref_type": "Work",
"ref_type:color": "",
"date_from": "1635369300",
"date_to": "1635373800",
"notes": null
},
{
"uuid": "d4009660-447c-47de-b0f3-3c1f2d8d8f99",
"ref_type": "Work",
"ref_type:color": "",
"date_from": "1635286500",
"date_to": "1635288300",
"notes": null
},
{
"uuid": "b3d883f0-b71f-4df7-bb63-a50f137528a4",
"ref_type": "Work",
"ref_type:color": "",
"date_from": "1635370200",
"date_to": "1635372900",
"notes": null
}
]
},
"sort": [
1633967157000
]
}
]
},
"aggregations": {
"role": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Houseman",
"doc_count": 4
},
{
"key": "Maintenance",
"doc_count": 4
},
{
"key": "Supervisor",
"doc_count": 4
},
{
"key": "Supervisor HSKP",
"doc_count": 4
},
{
"key": "Supervisor Maintenance",
"doc_count": 4
},
{
"key": "Administrator",
"doc_count": 3
},
{
"key": "Concierge dispatcher",
"doc_count": 3
},
{
"key": "Frontdesk",
"doc_count": 3
},
{
"key": "General manager",
"doc_count": 3
},
{
"key": "HKeeper",
"doc_count": 3
},
{
"key": "Housekeeper",
"doc_count": 3
},
{
"key": "Manager",
"doc_count": 3
}
]
},
"shifts.ref_type": {
"doc_count": 21,
"shifts.ref_type": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Work",
"doc_count": 19
},
{
"key": "test",
"doc_count": 2
}
]
}
},
"name": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Administrator 123",
"doc_count": 1
},
{
"key": "Administrator 223",
"doc_count": 1
},
{
"key": "Administrator 3",
"doc_count": 1
},
{
"key": "Concierge dispatcher 1",
"doc_count": 1
},
{
"key": "Concierge dispatcher 2",
"doc_count": 1
},
{
"key": "Concierge dispatcher 3",
"doc_count": 1
},
{
"key": "Frontdesk 1",
"doc_count": 1
},
{
"key": "Frontdesk 2",
"doc_count": 1
},
{
"key": "Frontdesk 3",
"doc_count": 1
},
{
"key": "General manager 1",
"doc_count": 1
},
{
"key": "General manager 2",
"doc_count": 1
},
{
"key": "General manager 3",
"doc_count": 1
},
{
"key": "HKeeper 1",
"doc_count": 1
},
{
"key": "HKeeper 2",
"doc_count": 1
},
{
"key": "HKeeper 3",
"doc_count": 1
},
{
"key": "Housekeeper 1",
"doc_count": 1
},
{
"key": "Housekeeper 2",
"doc_count": 1
},
{
"key": "Housekeeper 3",
"doc_count": 1
},
{
"key": "Houseman 1",
"doc_count": 1
},
{
"key": "Houseman 2",
"doc_count": 1
},
{
"key": "Houseman 3",
"doc_count": 1
},
{
"key": "Maintenance 1",
"doc_count": 1
},
{
"key": "Maintenance 2",
"doc_count": 1
},
{
"key": "Maintenance 3",
"doc_count": 1
},
{
"key": "Manager 1222",
"doc_count": 1
},
{
"key": "Manager 2",
"doc_count": 1
},
{
"key": "Manager 3",
"doc_count": 1
},
{
"key": "Supervisor 1",
"doc_count": 1
},
{
"key": "Supervisor 2",
"doc_count": 1
},
{
"key": "Supervisor 3",
"doc_count": 1
},
{
"key": "Supervisor HSKP 1",
"doc_count": 1
},
{
"key": "Supervisor HSKP 2",
"doc_count": 1
},
{
"key": "Supervisor HSKP 3",
"doc_count": 1
},
{
"key": "Supervisor Maintenance 1",
"doc_count": 1
},
{
"key": "Supervisor Maintenance 2",
"doc_count": 1
},
{
"key": "Supervisor Maintenance 3",
"doc_count": 1
}
]
}
}
}
At the moment, everything seems to be working correctly, except for one point. Aggregation by the type of shift ALWAYS outputs data, although they should not be found by the filter for the date.
any advice? thank you
I'm making the answer based on the assumption from my comment:
you want your query to return all employees
you want the ref_type aggregation to only include shifts matching your date range
you want the "shifts" collections under your results to likewise only include shifts matching your date range
Apologies if I misunderstood your question.
One thing to get out of the way first, though you may have been aware: the should part of your query is not restricting the results, it's only affecting the score, since you already have a must.
As a corollary it's not going to affect the aggregated results instead, for that you need to use a filter aggregation:
"aggs": {
"shifts.ref_type": {
"nested": {
"path": "shifts"
},
"aggs": {
"shifts.ref_type": {
"filter": {
"range": {
"shifts.date_from": {
"gte": 1635370100,
"lte": 1635370300
}
}
},
"aggs": {
"shifts.ref_type": {
"terms": {
"field": "shifts.ref_type",
"size": 1000
}
}
}
}
}
},
This is going to get you the filtered counts in your aggregation, but that still won't filter your results - you'll get all the "shifts" in your hits. So a thing to be aware of with nested documents, your query is going to restrict the documents that are returned, based on the matches in the nested documents, but it's not going to actually filter out the nested documents that did not match. In order to do that you have another feature, nested inner hits, which lets you figure out exactly which nested documents matched. It's still not enough in your case since you don't want to filter out the results entirely (so even if there is no "inner hit" you still want to return the document, or at least have it be part of the aggregation). So now you have yet another feature, post-filter, which you can use to filter the documents after they have been aggregated. Taking the 2 together:
"post_filter": {
"nested": {
"path": "shifts",
"query": {
"bool": {
"must": [
{
"range": {
"shifts.date_from": {
"lte": 635370200,
"gte": 635370200
}
}
}
]
}
},
"inner_hits": {}
}
},
If you now set _source: false, you won't have the hits, and you just get the shifts that matched (and then you still have the employee name and roles in your aggregation results):
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 0.08701137,
"hits": [
{
"_index": "employee_shift",
"_type": "_doc",
"_id": "-tRnLn0B5PjpsgKgGXlB",
"_score": 0.08701137,
"inner_hits": {
"shifts": {
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 1,
"hits": [
{
"_index": "employee_shift",
"_type": "_doc",
"_id": "-tRnLn0B5PjpsgKgGXlB",
"_nested": {
"field": "shifts",
"offset": 4
},
"_score": 1,
"_source": {
"notes": null,
"ref_type:color": "",
"date_to": 635372900,
"ref_type": "Work",
"uuid": "b3d883f0-b71f-4df7-bb63-a50f137528a4",
"date_from": 635370200
}
}
]
}
}
}
}
]
},
"aggregations": {
"role": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Housekeeper",
"doc_count": 5
}
]
},
"shifts.ref_type": {
"doc_count": 25,
"shifts.ref_type": {
"doc_count": 4,
"shifts.ref_type": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Work",
"doc_count": 3
},
{
"key": "Work2",
"doc_count": 1
}
]
}
}
},
"name": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "Housekeeper 4",
"doc_count": 2
},
{
"key": "Housekeeper 1",
"doc_count": 1
},
{
"key": "Housekeeper 2",
"doc_count": 1
},
{
"key": "Housekeeper 3",
"doc_count": 1
}
]
}
}
}

Using Vega with Elasticsearch data containing nested aggregations (or dividing one aggregation by another in Elasticsearch)

I'm trying to do something with Elasticsearch that should be quite simple. I have an index which contains documents of the shape: {"timestamp": int, "pricePerUnit": int, "units": int}. I want to visualize the average price over time in a histogram. Note that I don't want the average of the "pricePerUnit", I want the average price paid per unit, which means finding the total value in each time bucket by multiplying the "pricePerUnit" by the "units" for each document, and summing the total value sold in each document, then dividing by the sum of the total units sold in the time bucket to get the average price paid per unit. A standard Kibana line chart won't work. I can get the average "pricePerUnit * units", but can't divide this aggregation by the sum of the total units. Also can't be done in TSVB, as this doesn't allow for scripts/scripted fields. Can't use timelion, because the "timestamp" field isn't a time field (I know, but there's nothing I can do about it). I'm therefore trying to use Vega. However, I'm running into a problem with nested aggregations. Here's the ES query I'm running:
{
"$schema": "https://vega.github.io/schema/vega/v3.json",
"data": {
"name": "vals",
"url": {
"index": "index_name",
"body": {
"aggs": {
"2": {
"histogram": {
"field": "timestamp",
"interval": 2000,
"min_doc_count": 1
},
"aggs": {
"1": {
"avg": {
"field": "pricePerUnit",
"script": {
"inline": "doc['pricePerUnit'].value * doc['units'].value",
"lang": "painless"
}
}
}
}
}
},
"size": 0,
"stored_fields": [
"*"
],
"script_fields": {
"spend": {
"script": {
"source": "doc['pricePerUnit'].value * doc['units'].value",
"lang": "painless"
}
}
},
"docvalue_fields": [],
"_source": {
"excludes": []
},
"query": {
"bool": {
"must": [],
"filter": [
{
"match_all": {}
},
{
"range": {
"timeslot.startTime": {
"gte": 1621292400,
"lt": 1621428349
}
}
}
],
"should": [],
"must_not": []
}
}
},
"format": {"property": "aggregations.2.buckets"}
}
}
,
"scales": [
{
"name": "yscale",
"type": "linear",
"zero": true,
"domain": {"data": "vals", "field": "1.value"},
"range": "height"
},
{
"name": "xscale",
"type": "time",
"range": "width"
}
],
"axes": [
{"scale": "yscale", "orient": "left"},
{"scale": "xscale", "orient": "bottom"}
],
"marks": [
{
"type": "line",
"encode": {
"update": {
"x": {"scale": "xscale", "field": "key"},
"y": {"scale": "yscale", "field": "1.value"}
}
}
}
]
}
It gives me the following result set:
"took": 1,
"timed_out": false,
"_shards": {
"total": 4,
"successful": 4,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 401,
"max_score": null,
"hits": []
},
"aggregations": {
"2": {
"buckets": [
{
"1": {
"value": 86340
},
"key": 1621316000,
"doc_count": 7
},
{
"1": {
"value": 231592.92307692306
},
"key": 1621318000,
"doc_count": 13
},
{
"1": {
"value": 450529.23529411765
},
"key": 1621320000,
"doc_count": 17
},
{
"1": {
"value": 956080.0555555555
},
"key": 1621322000,
"doc_count": 18
},
{
"1": {
"value": 1199865.5714285714
},
"key": 1621324000,
"doc_count": 14
},
{
"1": {
"value": 875300.7368421053
},
"key": 1621326000,
"doc_count": 19
},
{
"1": {
"value": 926738.8
},
"key": 1621328000,
"doc_count": 20
},
{
"1": {
"value": 3239475.3333333335
},
"key": 1621330000,
"doc_count": 18
},
{
"1": {
"value": 3798063.714285714
},
"key": 1621332000,
"doc_count": 21
},
{
"1": {
"value": 482089.5
},
"key": 1621334000,
"doc_count": 4
},
{
"1": {
"value": 222952.33333333334
},
"key": 1621336000,
"doc_count": 12
},
{
"1": {
"value": 742225.75
},
"key": 1621338000,
"doc_count": 8
},
{
"1": {
"value": 204203.25
},
"key": 1621340000,
"doc_count": 4
},
{
"1": {
"value": 294886
},
"key": 1621342000,
"doc_count": 4
},
{
"1": {
"value": 284393.75
},
"key": 1621344000,
"doc_count": 4
},
{
"1": {
"value": 462800.5
},
"key": 1621346000,
"doc_count": 4
},
{
"1": {
"value": 233321.2
},
"key": 1621348000,
"doc_count": 5
},
{
"1": {
"value": 436757.8
},
"key": 1621350000,
"doc_count": 5
},
{
"1": {
"value": 4569021
},
"key": 1621352000,
"doc_count": 1
},
{
"1": {
"value": 368489.5
},
"key": 1621354000,
"doc_count": 4
},
{
"1": {
"value": 208359.4
},
"key": 1621356000,
"doc_count": 5
},
{
"1": {
"value": 7827146.375
},
"key": 1621358000,
"doc_count": 8
},
{
"1": {
"value": 63873.5
},
"key": 1621360000,
"doc_count": 6
},
{
"1": {
"value": 21300
},
"key": 1621364000,
"doc_count": 1
},
{
"1": {
"value": 138500
},
"key": 1621366000,
"doc_count": 2
},
{
"1": {
"value": 5872400
},
"key": 1621372000,
"doc_count": 1
},
{
"1": {
"value": 720200
},
"key": 1621374000,
"doc_count": 1
},
{
"1": {
"value": 208634.33333333334
},
"key": 1621402000,
"doc_count": 3
},
{
"1": {
"value": 306248.5
},
"key": 1621404000,
"doc_count": 10
},
{
"1": {
"value": 328983.77777777775
},
"key": 1621406000,
"doc_count": 18
},
{
"1": {
"value": 1081724
},
"key": 1621408000,
"doc_count": 10
},
{
"1": {
"value": 2451076.785714286
},
"key": 1621410000,
"doc_count": 14
},
{
"1": {
"value": 1952910.2857142857
},
"key": 1621412000,
"doc_count": 14
},
{
"1": {
"value": 2294818.1875
},
"key": 1621414000,
"doc_count": 16
},
{
"1": {
"value": 2841910.388888889
},
"key": 1621416000,
"doc_count": 18
},
{
"1": {
"value": 2401278.9523809524
},
"key": 1621418000,
"doc_count": 21
},
{
"1": {
"value": 4311845.4
},
"key": 1621420000,
"doc_count": 5
},
{
"1": {
"value": 617102.5333333333
},
"key": 1621422000,
"doc_count": 15
},
{
"1": {
"value": 590469.7142857143
},
"key": 1621424000,
"doc_count": 14
},
{
"1": {
"value": 391918.85714285716
},
"key": 1621426000,
"doc_count": 14
},
{
"1": {
"value": 202163.66666666666
},
"key": 1621428000,
"doc_count": 3
}
]
}
}
}
The problem is that I can't extract the "value" field from the "1" sub-aggregation. I've tried using a flatten transform, but it doesn't seem to work. If anyone can either:
a) Tell me how to solve this specific problem with Vega; or
b) Tell me another way to solve my original problem
I'd be much obliged!
Your DSL query is looking great. If I've read this correctly I believe what you are looking for is a project transform. This can make life a lot easier when dealing with nested variables, as there are certain instances where they just don't function as expected.
You also need to reference data within marks otherwise it will plot nothing.
Below is how to fix this, you'll just need to add your url parameter in.
{
$schema: https://vega.github.io/schema/vega/v3.json
data: [
{
name: vals
url: ... // fill this in
transform: [
{
type: project
fields: [
1.value
doc_count
key
]
as: [
val
doc_count
key
]
}
]
}
]
scales: [
{
name: yscale
type: linear
zero: true
domain: {
data: vals
field: val
}
range: height
}
{
name: xscale
type: time
domain: {
data: vals
field: key
}
range: width
}
]
axes: [
{
scale: yscale
orient: left
}
{
scale: xscale
orient: bottom
}
]
marks: [
{
type: line
from: {
data: vals
}
encode: {
update: {
x: {
scale: xscale
field: key
}
y: {
scale: yscale
field: val
}
}
}
}
]
}
In future if you are having issues, look at the examples found on the Vega Gallery. They also have extensive documentation. These two combined is all you need.

date.getHourOfDay() is giving strange results in aggregation

I am indexing some events and trying to get unique hours but the terms aggregation is giving weird response . I have the following query.
{
"size": 0,
"query": {
"bool": {
"must": [
{
"terms": {
"City": [
"Chicago"
]
}
},
{
"range": {
"eventDate": {
"gte": "2018-06-22",
"lte": "2018-06-22"
}
}
}
]
}
},
"aggs": {
"Hours": {
"terms": {
"script": "doc['eventDate'].date.getHourOfDay()"
}
}
}
}
This query produces following response.
"buckets": [
{
"key": "19",
"doc_count": 12
},
{
"key": "9",
"doc_count": 7
},
{
"key": "15",
"doc_count": 4
},
{
"key": "16",
"doc_count": 4
},
{
"key": "20",
"doc_count": 4
},
{
"key": "12",
"doc_count": 2
},
{
"key": "6",
"doc_count": 2
},
{
"key": "8",
"doc_count": 2
},
{
"key": "10",
"doc_count": 1
},
{
"key": "11",
"doc_count": 1
}
]
Now I changed the range to get the events for past one month
{
"range": {
"eventDate": {
"gte": "2018-05-22",
"lte": "2018-06-22"
}
}
}
and the response I got was
"Hours": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 1319,
"buckets": [
{
"key": "22",
"doc_count": 805
},
{
"key": "14",
"doc_count": 370
},
{
"key": "15",
"doc_count": 250
},
{
"key": "21",
"doc_count": 248
},
{
"key": "16",
"doc_count": 195
},
{
"key": "0",
"doc_count": 191
},
{
"key": "13",
"doc_count": 176
},
{
"key": "3",
"doc_count": 168
},
{
"key": "20",
"doc_count": 159
},
{
"key": "11",
"doc_count": 148
}
]
}
As you can see I got buckets with key 6,8,9,10 and 12 in the response of first query but not in the second query which is very strange as documents returned by first query is a small subset of the second query. Is this a bug or am I missing something obvious?
Thanks

VEGA Kibana - multi view chart FLATTEN Transformation

I am having a hard time figuring out how to build a multiview chart in Vega using aggregated/nested.
I believe it is related to the way I am trying to transform my data to use in Vega multi view chart. Can anyone give me a hand to understand this?
I know flatten transformation is working as debug shows
But what I get is this
This is what I would like to achieve
This is my schema to build the chart
{
"$schema": "https://vega.github.io/schema/vega/v3.json",
"width": 400,
"height": 200,
"padding": 5,
"data": [
{
"name": "source",
"values": {
"aggregations": {
"order_labels": {
"buckets": [
{
"key": "USD/CAD",
"doc_count": 1,
"orders": {
"doc_count": 40,
"orders_id": {
"buckets": [
{
"key": 5241,
"doc_count": 1,
"orders_price": {"value": 0.01991}
},
{
"key": 5242,
"doc_count": 1,
"orders_price": {"value": 0.02021}
}
]
}
}
},
{
"key": "CAD/COD",
"doc_count": 1,
"orders": {
"doc_count": 40,
"orders_id": {
"buckets": [
{
"key": 5041,
"doc_count": 1,
"orders_price": {"value": 0.00002953}
},
{
"key": 5042,
"doc_count": 1,
"orders_price": {"value": 0.00002971}
}
]
}
}
}
]
}
}
},
"format": {"property": "aggregations.order_labels.buckets"},
"transform": [
{
"type": "flatten",
"fields": ["orders.orders_id.buckets"],
"as": ["orders"]
}
]
}
],
"mark": "bar",
"encoding": {
"row": {
"field": "orders.key",
"type": "ordinal"
},
"x": {
"aggregate": "sum",
"field": "orders.orders_price.value",
"type": "quantitative",
"scale": { "zero": false }
},
"y": {
"field": "key",
"type": "ordinal",
"scale": { "rangeStep": 12 }
}
}
}
I did many things but can`t understand what is wrong with it
Kibana 6.4 supports flatten transform for sure. I think it was also supported in 6.3.

Why am I getting NaN from an elasticsearch aggregate query?

In the query below, occasionally I receive a "NaN" response (see the response below the query).
I'm assuming that, occasionally, some invalid data gets in to the "amount" field (the one being aggregated). If that is a valid assumption, how can I find those documents with the invalid "amount" fields so I can troubleshoot them?
If that's not a valid assumption, how do I troubleshoot the occasional "NaN" value being returned?
REQUEST:
POST /_msearch
{
"search_type": "query_then_fetch",
"ignore_unavailable": true,
"index": [
"view-2017-10-22",
"view-2017-10-23"
]
}
{
"size": 0,
"query": {
"bool": {
"filter": [
{
"range": {
"handling-time": {
"gte": "1508706273585",
"lte": "1508792673586",
"format": "epoch_millis"
}
}
},
{
"query_string": {
"analyze_wildcard": true,
"query": "+page:\"checkout order confirmation\" +pageType:\"d\""
}
}
]
}
},
"aggs": {
"2": {
"date_histogram": {
"interval": "1h",
"field": "time",
"min_doc_count": 0,
"extended_bounds": {
"min": "1508706273585",
"max": "1508792673586"
},
"format": "epoch_millis"
},
"aggs": {
"1": {
"sum": {
"field": "amount"
}
}
}
}
}
}
RESPONSE:
{
"responses": [
{
"took": 12,
"timed_out": false,
"_shards": {
"total": 10,
"successful": 10,
"failed": 0
},
"hits": {
"total": 44587,
"max_score": 0,
"hits": []
},
"aggregations": {
"2": {
"buckets": [
{
"1": {
"value": "NaN"
},
"key_as_string": "1508706000000",
"key": 1508706000000,
"doc_count": 2915
},
{
"1": {
"value": 300203.74
},
"key_as_string": "1508709600000",
"key": 1508709600000,
"doc_count": 2851
},
{
"1": {
"value": 348139.5600000001
},
"key_as_string": "1508713200000",
"key": 1508713200000,
"doc_count": 3197
},
{
"1": {
"value": "NaN"
},
"key_as_string": "1508716800000",
"key": 1508716800000,
"doc_count": 3449
},
{
"1": {
"value": "NaN"
},
"key_as_string": "1508720400000",
"key": 1508720400000,
"doc_count": 3482
},
{
"1": {
"value": 364449.60999999987
},
"key_as_string": "1508724000000",
"key": 1508724000000,
"doc_count": 3103
},
{
"1": {
"value": 334914.68
},
"key_as_string": "1508727600000",
"key": 1508727600000,
"doc_count": 2722
},
{
"1": {
"value": 315368.09000000014
},
"key_as_string": "1508731200000",
"key": 1508731200000,
"doc_count": 2161
},
{
"1": {
"value": 102244.34
},
"key_as_string": "1508734800000",
"key": 1508734800000,
"doc_count": 742
},
{
"1": {
"value": 37178.63
},
"key_as_string": "1508738400000",
"key": 1508738400000,
"doc_count": 333
},
{
"1": {
"value": 25345.68
},
"key_as_string": "1508742000000",
"key": 1508742000000,
"doc_count": 233
},
{
"1": {
"value": 85454.47000000002
},
"key_as_string": "1508745600000",
"key": 1508745600000,
"doc_count": 477
},
{
"1": {
"value": 24102.719999999994
},
"key_as_string": "1508749200000",
"key": 1508749200000,
"doc_count": 195
},
{
"1": {
"value": 23352.309999999994
},
"key_as_string": "1508752800000",
"key": 1508752800000,
"doc_count": 294
},
{
"1": {
"value": 44353.409999999996
},
"key_as_string": "1508756400000",
"key": 1508756400000,
"doc_count": 450
},
{
"1": {
"value": 80129.89999999998
},
"key_as_string": "1508760000000",
"key": 1508760000000,
"doc_count": 867
},
{
"1": {
"value": 122797.11
},
"key_as_string": "1508763600000",
"key": 1508763600000,
"doc_count": 1330
},
{
"1": {
"value": 157442.29000000004
},
"key_as_string": "1508767200000",
"key": 1508767200000,
"doc_count": 1872
},
{
"1": {
"value": 198831.71
},
"key_as_string": "1508770800000",
"key": 1508770800000,
"doc_count": 2251
},
{
"1": {
"value": 218384.08000000002
},
"key_as_string": "1508774400000",
"key": 1508774400000,
"doc_count": 2305
},
{
"1": {
"value": 229829.22000000006
},
"key_as_string": "1508778000000",
"key": 1508778000000,
"doc_count": 2381
},
{
"1": {
"value": 217157.56000000006
},
"key_as_string": "1508781600000",
"key": 1508781600000,
"doc_count": 2433
},
{
"1": {
"value": 208877.13
},
"key_as_string": "1508785200000",
"key": 1508785200000,
"doc_count": 2223
},
{
"1": {
"value": "NaN"
},
"key_as_string": "1508788800000",
"key": 1508788800000,
"doc_count": 2166
},
{
"1": {
"value": 18268.14
},
"key_as_string": "1508792400000",
"key": 1508792400000,
"doc_count": 155
}
]
}
},
"status": 200
}
]
}
You can do a search for <fieldName>:NaN (on numeric fields) to find numbers that are set to NaN.
Obviously, once you find those, you can either fix the root cause of the field being set to NaN, or you can exclude those records from the aggregation by adding a -<fieldName>:NaN to the query.
(It turns out that the input was feeding in some garbage characters once in every few million documents.)

Resources