Elasticsearch layered ordering - elasticsearch

I would like to be able to return typeahead items in a certain order. For example, search for Para should return:
Paracetamol
Parafin
LIQUID PARAFFIN
ISOMETHEPTENE WITH PARACETAMOL
1) The suggestions that begin with the search term para should be ordered at the top and in alphabetical order
2) The rest of the items should appear below and also in alphabetical order
Is this possible with Elasticsearch?
Update
What if I wanted the output to be like this:
Paracetamol
Parafin
Amber Paraffin
ISOMETHEPTENE WITH PARACETAMOL
LIQUID PARAFFIN
So all the terms that contain the prefix are at the top and everything else in alphabetical order.

This is my suggestion (also, you need to enable scripting):
PUT /test
{
"settings": {
"analysis": {
"analyzer": {
"autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"ngram"
]
},
"search_ngram": {
"type": "custom",
"tokenizer": "keyword",
"filter": "lowercase"
}
},
"filter": {
"ngram": {
"type": "ngram",
"min_gram": 2,
"max_gram": 15
}
}
}
},
"mappings": {
"test": {
"properties": {
"text": {
"type": "string",
"index_analyzer": "autocomplete",
"search_analyzer": "search_ngram",
"index_options": "positions",
"fields": {
"not_analyzed_sorting": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
}
}
POST test/test/_bulk
{"index":{"_id":1}}
{"text":"Paracetamol"}
{"index":{"_id":2}}
{"text":"Paracetamol xxx yyy zzz"}
{"index":{"_id":3}}
{"text":"Parafin"}
{"index":{"_id":4}}
{"text":"LIQUID PARAFFIN"}
{"index":{"_id":5}}
{"text":"ISOMETHEPTENE WITH PARACETAMOL"}
GET /test/test/_search
{
"query": {
"match": {
"text": "Para"
}
},
"sort": [
{
"_script": {
"type": "number",
"script": "termInfo=_index[field_to_search].get(term_to_search.toLowerCase(),_POSITIONS);if (termInfo) {for(pos in termInfo){return pos.position}};return 0;",
"params": {
"term_to_search": "Para",
"field_to_search": "text"
},
"order": "asc"
}
},
{
"text.not_analyzed_sorting": {
"order": "asc"
}
}
]
}
UPDATE
For your updated question, even if I would have preferred to have another post, use the following query:
{
"query": {
"match": {
"text": "Para"
}
},
"sort": [
{
"_script": {
"type": "number",
"script": "termInfo=_index[field_to_search].get(term_to_search.toLowerCase(),_POSITIONS);if (termInfo) {for(pos in termInfo){if (pos.position==0) return pos.position; else return java.lang.Integer.MAX_VALUE}};return java.lang.Integer.MAX_VALUE;",
"params": {
"term_to_search": "Para",
"field_to_search": "text"
},
"order": "asc"
}
},
{
"text.not_analyzed_sorting": {
"order": "asc"
}
}
]
}

Related

elastic search for mark character

I have two fields in Vietnamese: "mắt biếc" and "mật mã" in an index call books.
In books index, i use accifolding to transform from "mắt biếc" to "mat biec" and "mật mã" to "mat ma".
In two fields above, i need to query for a term : "mắt". But the score of two field is equal and what i want is "mắt biếc" have score greater than "mật mã".
So, how can i do that in elastic search.
You should use Function Score Query
Try this (base on version 7.x):
GET my_index/_search
{
"query": {
"function_score": {
"query": {
"match": {
"title": "mật"
}
},
"functions": [
{
"filter": {
"term": {
"title.keyword": {
"value": "mắt biếc"
}
}
},
"weight": 30
}
],
"max_boost": 30,
"score_mode": "max",
"boost_mode": "multiply"
}
}
}
Mappings example
PUT my_index
{
"settings": {
"analysis": {
"analyzer": {
"product_analyzer": {
"tokenizer": "standard",
"filter": [
"asciifolding"
]
}
}
}
},
"mappings": {
"properties": {
"title": {
"type": "text",
"analyzer": "product_analyzer",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"price": {
"type": "keyword"
},
"author": {
"type": "keyword"
},
"publisher": {
"type": "keyword"
}
}
}
}
You have to update your mappings in order to use title.keyword
Update Query
POST my_index/_mapping
{
"properties": {
"title": {
"type": "text",
"analyzer": "product_analyzer",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}
And then
Update all documents
POST my_index/_update_by_query?conflicts=proceed
Hope this helps

Elasticsearch term query to number token

I need to explain some weird behavior of term query to Elasticsearch database which contains number part in the string. Query is pretty simple:
{
"query": {
"bool": {
"should": [
{
"term": {
"address.street": "8 kvetna"
}
}
]
}
}
}
The problem is that term 8 kvetna returns empty result. I tried to _analyze it ad it make regular tokens like 8, k, kv, kve .... Also I am pretty sure there is a value 8 kvetna in database.
Here is the mapping for the field:
{
"settings": {
"index": {
"refresh_interval": "1m",
"number_of_shards": "1",
"number_of_replicas": "1",
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "20"
}
},
"analyzer": {
"autocomplete": {
"filter": [
"lowercase",
"asciifolding",
"autocomplete_filter"
],
"type": "custom",
"tokenizer": "standard"
}
"default": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "standard"
}
}
}
}
},
"mappings": {
"doc": {
"dynamic": "strict",
"_all": {
"enabled": false
},
"properties": {
"address": {
"properties": {
"city": {
"type": "text",
"analyzer": "autocomplete"
},
"street": {
"type": "text",
"analyzer": "autocomplete"
}
}
}
}
}
}
}
What caused this weird result? I don't understand it. Thanks for any help.
Great start so far! Your only issue is that you're using a term query, while you should use a match one. A term query will try to do an exact match for 8 kvetna and that's not what you want. The following query will work:
{
"query": {
"bool": {
"should": [
{
"match": { <--- change this
"address.street": "8 kvetna"
}
}
]
}
}
}

Elasticsearch query and Kibana not working as expected

I am trying to learn Elasticsearch and I am using Kibana to visualise things. I cannot seem to figure out what is wrong with my mapping and queries though.
I am trying to store photo metadata (iptc data). And I have the following mapping for it:
{
"settings": {
"index": {
"analysis": {
"filter": {},
"analyzer": {
"keyword_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"trim"
],
"char_filter": [],
"type": "custom",
"tokenizer": "keyword"
},
"edge_ngram_analyzer": {
"filter": [
"lowercase"
],
"tokenizer": "edge_ngram_tokenizer"
},
"edge_ngram_search_analyzer": {
"tokenizer": "lowercase"
}
},
"tokenizer": {
"edge_ngram_tokenizer": {
"type": "edge_ngram",
"min_gram": 2,
"max_gram": 5,
"token_chars": [
"letter"
]
}
}
}
}
},
"mappings": {
"doc": {
"properties": {
"photo_added": {
"type": "date",
"index": true,
"format": "yyyy-MM-dd' 'H:m:s"
},
"photo_id": {
"type": "long",
"index": true
},
"photo_owner": {
"type": "long",
"index": true
},
"project": {
"type": "long",
"index": true
},
"iptc": {
"type": "nested",
"properties": {
"caption/abstract": {
"type": "text",
"index": true
},
"copyright notice": {
"type": "text",
"index": true
},
"keywords": {
"type": "text",
"index": true,
"fields": {
"keywordstring": {
"type": "text",
"analyzer": "keyword_analyzer"
},
"edgengram": {
"type": "text",
"analyzer": "edge_ngram_analyzer",
"search_analyzer": "edge_ngram_search_analyzer"
},
"completion": {
"type": "completion"
},
"keyword": {
"type": "keyword"
}
}
},
"object name": {
"type": "text",
"index": true
},
"province/state": {
"type": "text",
"index": true
},
"sub-location": {
"type": "text",
"index": true
},
"time created": {
"type": "text",
"index": true
},
"urgency": {
"type": "text",
"index": true
},
"writer/editor": {
"type": "text",
"index": true
}
}
}
}
}
}
}
The thing is: I want a query that searching through the keywords and caption for the existence of the search-text. Whenever the search-text is found within keywords, the score is boosted because that indicated that the photo is of higher relevance. So I formulated the following query (where value is the search-text):
GET /photos/_search
{
"query": {
"dis_max": {
"queries": [
{
"fuzzy": {
"iptc.keywords": {
"value": "value",
"fuzziness": 1,
"boost": 1
}
}
},
{
"fuzzy": {
"iptc.caption/abstract": {
"value": "value",
"fuzziness": 1
}
}
}
]
}
}
}
However it does not seem to find any matches despite the fact that the value is in the documents... And I cannot seem to construct a simple match query that will match against the exact text... for example:
GET /photos/doc/_search?error_trace=true
{
"query": {
"match": {
"iptc.caption/abstract": "exact value from one of the documents"
}
}
}
Will return 0 results... the search-text is however exactly in the document.. I don't know what to do know. To make matters worse (for me, since I am near bald thanks due to the frustration that is causing me) Kibana seems to act up.. I am almost sure it is something really simple (the document date is within 5 years) but when filtering for the exact copy pasted value it returns 0 results... as shown in the screenshot...
I am going crazy here. Does someone know how to fix this or what in earths name I am doing wrong?
I found the solution which is in the documentation of Elastic.
Because nested documents are indexed as separate documents, they can only be accessed within the scope of the nested query, the nested/reverse_nested aggregations, or nested inner hits.
Documentation
So I constructed the following query which works.
{
"query": {
"nested": {
"path": "iptc",
"query": {
"bool": {
"should": [
{
"dis_max": {
"queries": [
{
"fuzzy": {
"iptc.keywords": {
"value": "Feyenoord",
"boost": 1
}
}
},
{
"fuzzy": {
"iptc.caption/abstract": {
"value": "Feyenoord",
"fuzziness": 1
}
}
}
]
}
}
]
}
}
}
}

elasticsearch match query is not working for numbers

I have a search query which is used to search in report name.
I have indexed the field name with autocomplete,edge_ngram
Normal field name search is proper when i'm having a number / year in the field name it's not working.
Query :
{
"query": {
"function_score": {
"query": {
"bool": {
"should": [
{
"match": {
"field_name": {
"query": "hybrid seeds india 2017",
"operator": "and"
}
}
}
]
}
}
}
},
"from": 0,
"size": 10
}
Setting and the Mappings
{
"mappings": {
"pages": {
"properties": {
"report_name": {
"fields": {
"autocomplete": {
"search_analyzer": "report_name_search",
"analyzer": "report_name_index",
"type": "string"
},
"report_name": {
"index": "not_analyzed",
"type": "string"
}
},
"type": "multi_field"
}
}
}
},
"settings": {
"analysis": {
"filter": {
"report_name_ngram": {
"max_gram": 150,
"min_gram": 2,
"type": "edge_ngram"
}
},
"analyzer": {
"report_name_index": {
"filter": [
"lowercase",
"report_name_ngram"
],
"tokenizer": "keyword"
},
"report_name_search": {
"filter": [
"lowercase"
],
"tokenizer": "keyword"
}
}
}
}
}
Can you guys help me out in this.
Thanks in advance

Highlight with fuzziness and ngram

I guess the title of the topic spoiled you enough :D
I use edge_ngram and highlight to build an autocomplete search. I have added fuzziness in the query to allow users to mispell their search, but it brokes a bit the highlight.
When i write Sport this is what I get :
<em>Spor</em>t
<em>Spor</em>t mécanique
<em>Spor</em>t nautique
I guess it's because it matches with the token spor generated by the ngram tokenizer.
The query:
{
"query": {
"bool": {
"should": [
{
"match": {
"name": {
"query": "sport",
"operator": "and",
"fuzziness": "AUTO"
}
}
},
{
"match_phrase_prefix": {
"name.raw": {
"query": "sport"
}
}
}
]
}
},
"highlight": {
"fields": {
"name": {
"term_vector": "with_positions_offsets"
}
}
}
}
And the mapping:
{
"settings": {
"analysis": {
"analyzer": {
"partialAnalyzer": {
"type": "custom",
"tokenizer": "ngram_tokenizer",
"filter": ["asciifolding", "lowercase"]
},
"keywordAnalyzer": {
"type": "custom",
"tokenizer": "keyword",
"filter": ["asciifolding", "lowercase"]
},
"searchAnalyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["asciifolding", "lowercase"]
}
},
"tokenizer": {
"ngram_tokenizer": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "15",
"token_chars": [ "letter", "digit" ]
}
}
}
},
"mappings": {
"place": {
"properties": {
"name": {
"type": "string",
"index_analyzer": "partialAnalyzer",
"search_analyzer": "searchAnalyzer",
"term_vector": "with_positions_offsets",
"fields": {
"raw": {
"type": "string",
"analyzer": "keywordAnalyzer"
}
}
}
}
}
}
}
I tried to add a new match clause without fuzziness in the query to try to match the keyword before the match with fuzziness but it changed nothing.
'match': {
'name': {
'query': 'sport',
'operator': 'and'
}
Any idea how I can handle this?
Regards, Raphaël
You could do that with highlight_query I guess
Try this in your highlighting query.
"highlight": {
"fields": {
"name": {
"term_vector": "with_positions_offsets",
"highlight_query": {
"match": {
"name.raw": {
"query": "spotr",
"fuzziness": 2
}
}
}
}
}
}
I hope it helps.

Resources