I am trying to compare hours using painless language in my elasticsearch query. I would like query something like:
{
"script":"doc['schedule.from_time'] >= doc['schedule.to_time']"
}
But I have the error:
Cannot apply [>] operation to types [org.elasticsearch.index.fielddata.ScriptDocValues.Dates]
The scheme of the nested document is:
{
"settings": {
"index.mapping.total_fields.limit": 10000
},
"mappings": {
"_doc": {
"dynamic_templates": [{
"integers": {
"match_mapping_type": "long",
"mapping": {
"type": "long",
"index": false
}
}
}],
"properties": {
"enabled_services": {
"type": "nested",
"properties": {
"service_id": {
"type": "text",
"analyzer": "whitespace",
"search_analyzer": "whitespace"
},
"available_day_of_week": {
"type": "long"
},
"available_from_time": {
"type": "date",
"format": "hour_minute"
},
"available_to_time": {
"type": "date",
"format": "hour_minute"
}
}
}
}
}
}
}
(The values are formated like "2:00" or "18:00").
I have tried to use .date or .value but it does not work because my variable contains only hours not datetime.
Can someone help me :)
I think you are looking for:
doc['enabled_services.available_from_time'].value.isAfter(doc['enabled_services.available_to_time'].value)
You also need to remove "type": "nested" from your mapping. I think it is not needed in your case.
Working code is below:
Mapping
PUT /painless-dates
{
"settings": {
"index.mapping.total_fields.limit": 10000
},
"mappings": {
"_doc": {
"dynamic_templates": [
{
"integers": {
"match_mapping_type": "long",
"mapping": {
"type": "long",
"index": false
}
}
}
],
"properties": {
"enabled_services": {
"properties": {
"service_id": {
"type": "text",
"analyzer": "whitespace",
"search_analyzer": "whitespace"
},
"available_day_of_week": {
"type": "long"
},
"available_from_time": {
"type": "date",
"format": "hour_minute"
},
"available_to_time": {
"type": "date",
"format": "hour_minute"
}
}
}
}
}
}
}
Add two elements
POST /painless-dates/_doc
{
"enabled_services": {
"available_from_time": "02:00",
"available_to_time": "18:00"
}
}
POST /painless-dates/_doc
{
"enabled_services": {
"available_from_time": "04:00",
"available_to_time": "03:00"
}
}
Query
GET /painless-dates/_search
{
"query": {
"bool": {
"must": {
"script": {
"script": {
"source": "doc['enabled_services.available_from_time'].value.isAfter(doc['enabled_services.available_to_time'].value)",
"lang": "painless"
}
}
}
}
}
}
Answer
{
"took": 8,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "painless-dates",
"_type": "_doc",
"_id": "0wFw7mYBueYINcTmJsMG",
"_score": 1,
"_source": {
"enabled_services": {
"available_from_time": "04:00",
"available_to_time": "03:00"
}
}
}
]
}
}
OK I found the answer:
{
"script": {
"script": "doc['enabled_services.available_from_time'].date.isBefore(doc['enabled_services.available_to_time'].date)"
}
}
Thank you all !
Related
I have an elasticsearch index with multiple deep level nestings. I am performing terms aggregation on one deep level field and want to fetch all the records for another associated deep level field. For which I am using the top_hits aggregation. But my query is returning me "index_out_of_bounds" exception.
Here are index mappings:
{
"mappings": {
"type": {
"properties": {
"campaigns": {
"type": "nested",
"properties": {
"campaign_id": {
"type": "integer"
},
"campaign_name": {
"type": "text"
},
"contents": {
"type": "nested",
"properties": {
"content_id": {
"type": "integer"
},
"content_name": {
"type": "text",
"fielddata": true
}
}
}
}
},
"forms": {
"type": "nested",
"properties": {
"form_id": {
"type": "integer"
},
"form_issubmitted": {
"type": "integer"
},
"form_name": {
"type": "text"
},
"form_tabs": {
"type": "nested",
"properties": {
"tab_id": {
"type": "integer"
},
"tab_name": {
"type": "text"
},
"tab_section": {
"properties": {
"section_id": {
"type": "long"
},
"section_name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
}
}
}
}
}
}
}
}
and my query is looking like this:
{
"size": 0,
"aggs": {
"sectionAgg": {
"nested": {
"path": "forms.form_tabs.tab_sections"
},
"aggs": {
"termsField": {
"filter": {
"bool": {}
},
"aggs": {
"sectionFields": {
"terms": {
"field": "forms.form_tabs.tab_sections.section_id",
"size": 10000
},
"aggs": {
"sectionFieldDocs": {
"top_hits": {
"size": 1,
"_source": [
"forms.form_tabs.tab_sections.*"
]
}
},
"completioncampaigns.contentsFields": {
"reverse_nested": {
"path": "campaigns.contents"
},
"aggs": {
"completionFieldFilter": {
"filter": {
"bool": {}
},
"aggs": {
"campaignContents": {
"top_hits": {
"size": 100,
"_source": [
"campaigns.contents.*"
]
}
}
}
}
}
}
}
}
}
}
}
}
}
}
Not pasting the whole result otherwise, it'll be very long. But it does have aggregation data also.
And it is throwing me the error like this
{
"took": 178,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 2,
"skipped": 0,
"failed": 3,
"failures": [
{
"shard": 1,
"index": "userlocal",
"node": "KmxVF34iTXWeLFLmtCy7WQ",
"reason": {
"type": "index_out_of_bounds_exception",
"reason": "2147483647 is out of bounds: [0-803["
}
},
{
"shard": 2,
"index": "userlocal",
"node": "KmxVF34iTXWeLFLmtCy7WQ",
"reason": {
"type": "index_out_of_bounds_exception",
"reason": "2147483647 is out of bounds: [0-1278["
}
},
{
"shard": 4,
"index": "userlocal",
"node": "KmxVF34iTXWeLFLmtCy7WQ",
"reason": {
"type": "index_out_of_bounds_exception",
"reason": "2147483647 is out of bounds: [0-2659["
}
}
]
}
}
I want to know why this error is happening and how to resolve this.
I have written the below query in the elasticsearch, for summing the column value of len_err where the app_name is 9 and the timestamp is specified in the query.
GET features-1/_search
{
"query": {
"match": {
"app_name": 9
}
},
"post_filter": {
"range": {
"timestamp": {
"gte": "2018-07-21T09:14:12Z",
"lte": "2018-07-21T09:14:20Z"
}
}
},
"aggs": {
"time_filter":{
"filter": {
"range": {
"timestamp": {
"gte": "2018-07-21T09:14:12Z",
"lte": "2018-07-21T09:14:20Z"
}
}
},
"aggs": {
"cont_err": {
"sum": {
"field": "len_err"
}
}
}
}
}
}
but the result that i am getting is
{
"took": 36,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
},
"aggregations": {
"time_filter": {
"doc_count": 0,
"cont_err": {
"value": 0
}
}
}
}
but when I check the logs i see that i have logs for this filter criteria
One sample doc that should match
Mapping below
{
"features-1": {
"mappings": {
"log": {
"properties": {
"app_name": {
"type": "long"
},
"len_err": {
"type": "long"
},
"len_msg": {
"type": "long"
},
"severity": {
"type": "long"
},
"source": {
"properties": {
"docker": {
"properties": {
"container_id": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
"edge_id": {
"type": "long"
},
"kubernetes": {
"properties": {
"container_name": {
"type": "long"
},
"host": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
"level": {
"type": "long"
},
"log": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"log_field": {
"properties": {
"LogLevel": {
"type": "long"
},
"charging": {
"type": "long"
}
}
}
}
},
"timestamp": {
"type": "date"
}
}
}
}
}
}
I have my analyzers set like this:
"analyzer": {
"edgeNgram_autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": ["lowercase", "autocomplete"]
},
"full_name": {
"filter":["standard","lowercase","asciifolding"],
"type":"custom",
"tokenizer":"standard"
}
My filter:
"filter": {
"autocomplete": {
"type": "edgeNGram",
"side":"front",
"min_gram": 1,
"max_gram": 50
}
Name field analyzer:
"textbox": {
"_parent": {
"type": "document"
},
"properties": {
"text": {
"fields": {
"text": {
"type":"string",
"analyzer":"full_name"
},
"autocomplete": {
"type": "string",
"index_analyzer": "edgeNgram_autocomplete",
"search_analyzer": "full_name",
"analyzer": "full_name"
}
},
"type":"multi_field"
}
}
}
Put all together, makes up my mapping for docstore index:
PUT http://localhost:9200/docstore
{
"settings": {
"analysis": {
"analyzer": {
"edgeNgram_autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": ["lowercase", "autocomplete"]
},
"full_name": {
"filter":["standard","lowercase","asciifolding"],
"type":"custom",
"tokenizer":"standard"
}
},
"filter": {
"autocomplete": {
"type": "edgeNGram",
"side":"front",
"min_gram": 1,
"max_gram": 50
} }
}
},
"mappings": {
"space": {
"properties": {
"name": {
"type": "string",
"index": "not_analyzed"
}
}
},
"document": {
"_parent": {
"type": "space"
},
"properties": {
"name": {
"type": "string",
"index": "not_analyzed"
}
}
},
"textbox": {
"_parent": {
"type": "document"
},
"properties": {
"bbox": {
"type": "long"
},
"text": {
"fields": {
"text": {
"type":"string",
"analyzer":"full_name"
},
"autocomplete": {
"type": "string",
"index_analyzer": "edgeNgram_autocomplete",
"search_analyzer": "full_name",
"analyzer":"full_name"
}
},
"type":"multi_field"
}
}
},
"entity": {
"_parent": {
"type": "document"
},
"properties": {
"bbox": {
"type": "long"
},
"name": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
Add a space to hold all docs:
POST http://localhost:9200/docstore/space
{
"name": "Space 1"
}
When user enters word: proj
this should return, all text:
SampleProject
Sample Project
Project Name
myProjectname
firstProjectName
my ProjectName
But it returns nothing.
My query:
POST http://localhost:9200/docstore/textbox/_search
{
"query": {
"match": {
"text": "proj"
}
},
"filter": {
"has_parent": {
"type": "document",
"query": {
"term": {
"name": "1-a1-1001.pdf"
}
}
}
}
}
If I search by project, I get:
{ "took": 4,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 3.0133555,
"hits": [
{
"_index": "docstore",
"_type": "textbox",
"_id": "AVRuV2d_f4y6IKuxK35g",
"_score": 3.0133555,
"_routing": "AVRuVvtLf4y6IKuxK33f",
"_parent": "AVRuV2cMf4y6IKuxK33g",
"_source": {
"bbox": [
8750,
5362,
9291,
5445
],
"text": [
"Sample Project"
]
}
},
{
"_index": "docstore",
"_type": "textbox",
"_id": "AVRuV2d_f4y6IKuxK35Y",
"_score": 2.4106843,
"_routing": "AVRuVvtLf4y6IKuxK33f",
"_parent": "AVRuV2cMf4y6IKuxK33g",
"_source": {
"bbox": [
8645,
5170,
9070,
5220
],
"text": [
"Project Name and Address"
]
}
}
]
}
}
Maybe my edgengram is not suited for this?
I am saying:
side":"front"
Should I do it differently?
Does anyone know what I am doing wrong?
The problem is with the autocomplete indexing analyzer field name.
Change:
"index_analyzer": "edgeNgram_autocomplete"
To:
"analyzer": "edgeNgram_autocomplete"
And also search like (#Andrei Stefan) showed in his answer:
POST http://localhost:9200/docstore/textbox/_search
{
"query": {
"match": {
"text.autocomplete": "proj"
}
}
}
And it will work as expected!
I have tested your configuration on Elasticsearch 2.3
By the way, type multi_field is deprecated.
Hope I have managed to help :)
Your query should actually try to match on text.autocomplete and not text:
"query": {
"match": {
"text.autocomplete": "proj"
}
}
I'm trying to use ElasticSearch 1.7.3 to implement a Did-you-mean function for my company's search engine. I've followed the documentation to set up a Phrase Suggester and created a customized mapping to support that.
However, when I do a _suggest query, I get ElasticsearchIllegalArgumentException[Suggester[simple_phrase] not supported]. What am I doing wrong?
This is my query:
POST knowledge_graph/entities/_suggest
{
"suggest": {
"text" : "apple in",
"simple_phrase": {
"phrase" : {
"field" : "canonical_name"
}
}
}
}
I get the following response:
{
"_shards": {
"total": 5,
"successful": 0,
"failed": 5,
"failures": [
{
"index": "knowledge_graph",
"shard": 0,
"status": 500,
"reason": "BroadcastShardOperationFailedException[[knowledge_graph][0] ]; nested: ElasticsearchException[failed to execute suggest]; nested: ElasticsearchIllegalArgumentException[Suggester[simple_phrase] not supported]; "
},
...
]
}
}
Here's my index's settings and mappings:
{
"knowledge_graph": {
"aliases": {},
"mappings": {
"entities": {
"properties": {
"autocomplete": {
"type": "completion",
"analyzer": "simple",
"payloads": true,
"preserve_separators": true,
"preserve_position_increments": true,
"max_input_length": 50
},
"canonical_name": {
"type": "string",
"analyzer": "simple",
"fields": {
"shingles": {
"type": "string",
"analyzer": "simple_shingle_analyzer"
}
}
},
"entity_query": {
"properties": {
"simple_phrase": {
"properties": {
"phrase": {
"properties": {
"field": {
"type": "string"
}
}
}
}
},
"text": {
"type": "string"
}
}
},
"suggest": {
"properties": {
"simple_phrase": {
"properties": {
"phrase": {
"properties": {
"field": {
"type": "string"
}
}
}
}
},
"text": {
"type": "string"
}
}
}
}
}
},
"settings": {
"index": {
"creation_date": "1449251691345",
"analysis": {
"filter": {
"shingles_1_6": {
"type": "shingle",
"max_shingle_size": "6",
"output_unigrams_if_no_shingles": "true"
}
},
"analyzer": {
"simple_shingle_analyzer": {
"type": "custom",
"filter": [
"lowercase",
"shingles_1_6"
],
"tokenizer": "standard"
}
}
},
"number_of_shards": "5",
"number_of_replicas": "0",
"version": {
"created": "1070399"
},
"uuid": "g_Yp7z6kQHCDRtd6TvVlzQ"
}
},
"warmers": {}
}
}
There are two ways to execute suggest requests, one with _search endpoint and another with _suggest endpoint.
From the Docs
Suggest requests executed against the _suggest endpoint should omit
the surrounding suggest element which is only used if the suggest
request is part of a search.
Your query will work if you execute it against _search api
POST knowledge_graph/entities/_search <---- here
{
"suggest": {
"text" : "apple in",
"simple_phrase": {
"phrase" : {
"field" : "canonical_name"
}
}
},
"size" : 0
}
If you want to query with _suggest endpoint try this
POST knowledge_graph/_suggest
{
"suggest": {
"text": "apple in",
"phrase": {
"field": "canonical_name"
}
}
}
Note - I think you should be executing phrase suggest against canonical_name.shingles
I have this mapping & query. everything is working, except when i want to filter those contents with mentioned "tagid"s. it returns zero results.
i want to filter contents based on tag ids.
{
"mappings": {
"video": {
"_all": {
"enabled": true
},
"properties": {
"title": {
"type": "string"
},
"en_title": {
"type": "string"
},
"tags": {
"type": "nested",
"properties": {
"tagname": {
"type": "string"
},
"tagid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"metadescription": {
"type": "string"
},
"author": {
"type": "string"
},
"description": {
"type": "string"
},
"items": {
"type": "nested",
"properties": {
"item_title": {
"type": "string"
},
"item_duration": {
"type": "string",
"index": "not_analyzed"
}
}
},
"isfeatured": {
"type": "string",
"index": "not_analyzed"
},
"image": {
"type": "string",
"index": "not_analyzed"
},
"contenttype": {
"type": "string",
"index": "not_analyzed"
},
"category": {
"type": "string",
"index": "not_analyzed"
},
"categoryalias": {
"type": "string",
"index": "not_analyzed"
},
"url": {
"type": "string",
"index": "not_analyzed"
},
"authorid": {
"type": "string",
"index": "not_analyzed"
},
"price": {
"type": "string",
"index": "not_analyzed"
},
"duration": {
"type": "string",
"index": "not_analyzed"
},
"publishdate": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss"
}
}
}
}
}
and this is the query:
{
"index": "content",
"type": "video",
"body": {
"query": {
"filtered": {
"query": {
"match_all": { }
},
"filter": {
"bool": {
"must": [
{
"nested": {
"path": "tags",
"query": {
"bool": {
"should": [
{
"term": {
"tagid": "193"
}
},
{
"term": {
"tagid": "194"
}
}
]
}
}
}
},
{
"term": {
"categoryalias": "digilife"
}
},
{
"term": {
"price": 0
}
}
]
}
}
}
},
"from": 0,
"size": 9,
"sort": [
"_score"
]
}
}
Your nested filter in your query is not quite correct. For the field names where you have tagid, it should be tags.tagid. Full query should be
{
"index": "content",
"type": "video",
"body": {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"bool": {
"must": [{
"nested": {
"path": "tags",
"query": {
"bool": {
"should": [{
"term": {
"tags.tagid": "193"
}
}, {
"term": {
"tags.tagid": "194"
}
}]
}
}
}
}, {
"term": {
"categoryalias": "digilife"
}
}, {
"term": {
"price": 0
}
}]
}
}
}
},
"from": 0,
"size": 9,
"sort": [
"_score"
]
}
}
EDIT:
Here's a complete working example to get you started. I have used Sense for this but you can use cURL or the language client of you choice.
For the mapping
curl -XPUT "http://localhost:9200/content" -d'
{
"mappings": {
"video": {
"_all": {
"enabled": true
},
"properties": {
"title": {
"type": "string"
},
"en_title": {
"type": "string"
},
"tags": {
"type": "nested",
"properties": {
"tagname": {
"type": "string"
},
"tagid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"metadescription": {
"type": "string"
},
"author": {
"type": "string"
},
"description": {
"type": "string"
},
"items": {
"type": "nested",
"properties": {
"item_title": {
"type": "string"
},
"item_duration": {
"type": "string",
"index": "not_analyzed"
}
}
},
"isfeatured": {
"type": "string",
"index": "not_analyzed"
},
"image": {
"type": "string",
"index": "not_analyzed"
},
"contenttype": {
"type": "string",
"index": "not_analyzed"
},
"category": {
"type": "string",
"index": "not_analyzed"
},
"categoryalias": {
"type": "string",
"index": "not_analyzed"
},
"url": {
"type": "string",
"index": "not_analyzed"
},
"authorid": {
"type": "string",
"index": "not_analyzed"
},
"price": {
"type": "string",
"index": "not_analyzed"
},
"duration": {
"type": "string",
"index": "not_analyzed"
},
"publishdate": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss"
}
}
}
}
}'
We can check the mapping is as expected with
curl -XGET "http://localhost:9200/content/video/_mapping"
Now, let's index some documents into the index
// document with id 1
curl -XPOST "http://localhost:9200/content/video/1" -d'
{
"tags": [
{
"tagname" : "tag 193",
"tagid": "193"
}
],
"price": 0,
"categoryalias": "digilife"
}'
// document with id 2
curl -XPOST "http://localhost:9200/content/video/2" -d'
{
"tags": [
{
"tagname" : "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "digilife"
}'
// document with id 3
curl -XPOST "http://localhost:9200/content/video/3" -d'
{
"tags": [
{
"tagname" : "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "different category alias"
}'
Now, let's run the query. I've removed the superfluous parts of the query and simplified it
curl -XGET "http://localhost:9200/content/video/_search" -d'
{
"query": {
"filtered": {
"filter": {
"bool": {
"must": [
{
"nested": {
"path": "tags",
"query": {
"terms": {
"tags.tagid": [
"193",
"194"
]
}
}
}
},
{
"term": {
"categoryalias": "digilife"
}
},
{
"term": {
"price": 0
}
}
]
}
}
}
},
"size": 9
}'
Only documents with ids 1 and 2 should be returned. This is confirmed with the results
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 1,
"hits": [
{
"_index": "content",
"_type": "video",
"_id": "1",
"_score": 1,
"_source": {
"tags": [
{
"tagname": "tag 193",
"tagid": "193"
}
],
"price": 0,
"categoryalias": "digilife"
}
},
{
"_index": "content",
"_type": "video",
"_id": "2",
"_score": 1,
"_source": {
"tags": [
{
"tagname": "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "digilife"
}
}
]
}
}