I have two fields in Vietnamese: "mắt biếc" and "mật mã" in an index call books.
In books index, i use accifolding to transform from "mắt biếc" to "mat biec" and "mật mã" to "mat ma".
In two fields above, i need to query for a term : "mắt". But the score of two field is equal and what i want is "mắt biếc" have score greater than "mật mã".
So, how can i do that in elastic search.
You should use Function Score Query
Try this (base on version 7.x):
GET my_index/_search
{
"query": {
"function_score": {
"query": {
"match": {
"title": "mật"
}
},
"functions": [
{
"filter": {
"term": {
"title.keyword": {
"value": "mắt biếc"
}
}
},
"weight": 30
}
],
"max_boost": 30,
"score_mode": "max",
"boost_mode": "multiply"
}
}
}
Mappings example
PUT my_index
{
"settings": {
"analysis": {
"analyzer": {
"product_analyzer": {
"tokenizer": "standard",
"filter": [
"asciifolding"
]
}
}
}
},
"mappings": {
"properties": {
"title": {
"type": "text",
"analyzer": "product_analyzer",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"price": {
"type": "keyword"
},
"author": {
"type": "keyword"
},
"publisher": {
"type": "keyword"
}
}
}
}
You have to update your mappings in order to use title.keyword
Update Query
POST my_index/_mapping
{
"properties": {
"title": {
"type": "text",
"analyzer": "product_analyzer",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}
And then
Update all documents
POST my_index/_update_by_query?conflicts=proceed
Hope this helps
Related
I have this structure:
and want to get the pricingbytrade for only the ones that the name matches hvac
Right now I am running this query:
{
"query": {
"bool": {
"must": [
{
"term": {
"providerid": {
"value": 13
}
}
},
{
"nested": {
"path": "pricingbytrade",
"query": {
"bool": {
"must": [
{
"match": {
"pricingbytrade.name": "hvac"
}
}
]
}
}
}
}
]
}
},
"_source": {
"includes": [
"providerid",
"pricingbytrade"
]
}
}
But that query returns all of them instead of only the ones with hvac
I am using Elasticseach 6.7 so the inner_hits does not work here.
And this is a section of the mapping where involve the structure:
"pricingbytrade": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"analyzed": {
"type": "text",
"analyzer": "standard",
"search_analyzer": "synonym_analyzer"
},
"keyword": {
"type": "keyword",
"ignore_above": 256,
"normalizer": "lowercase"
}
}
},
"pricingbycbsa": {
"type": "nested",
"properties": {
"cbsaid": {
"type": "integer"
},
"cbsaname": {
"type": "text"
},
"pricingscore": {
"type": "float"
}
}
}
}
}
Any suggestion??
How can I search for documents in Elasticsearch that have numeric field with value having decimal places?
My Mapping is as follows:
POST /itemnew/_doc
{
"mappings": {
"_doc": {
"properties": {
"name": {
"type": "string",
"analyzer": "edge_ngram_analyzer",
},
"purchase_price": {
"type": "double"
},
"sale_price": {
"type": "double"
},
"sku": {
"type": "string",
},
"settings": {
"index": {
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram",
}
},
"analyzer": {
"ngram_analyzer": {
"tokenizer": "standard",
}
Sample document is as follows:
PUT itemnew/_doc/3
{
"company_id":"4510339694428161" ,
"item_type": "goods",
"name":"Apple sam" ,
"purchase_price":"45.50" ,
"sale_price":"50",
"sku": "sku 123"
}
I get NumberFormatException when I try the following query: GET itemnew/_search
{
"query": {
"bool": {
"must": [
{
"multi_match": {
"query": "45.5",
"fields": [
"name",
"purchase_price",
"sale_price",
"sku"
],
"type": "most_fields"
```
How can I search for documents in Elasticsearch that have numeric field with value having decimal places?Please help me to solve this issue. Thank you }
You can use a lenient top-level parameter for a multi-match query here. Adding lenient just ignore exception that occurs due to format failures.
lenient (Optional, Boolean) If true, format-based errors, such as
providing a text query value for a numeric field, are ignored.
Defaults to false.
Adding a working example
Index Mapping:
PUT testidx
{
"settings": {
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram"
}
},
"analyzer": {
"ngram_analyzer": {
"tokenizer": "standard"
}
}
}
},
"mappings": {
"properties": {
"name": {
"type": "text",
"analyzer": "ngram_analyzer"
},
"purchase_price": {
"type": "double"
},
"sale_price": {
"type": "double"
},
"sku": {
"type": "text"
}
}
}
}
Index Data:
PUT testidx/_doc/1
{
"company_id": "4510339694428161",
"item_type": "goods",
"name": "Apple sam",
"purchase_price": "45.50",
"sale_price": "50",
"sku": "sku 123"
}
Search Query:
POST testidx/_search
{
"query": {
"bool": {
"must": [
{
"multi_match": {
"query": "hello",
"fields": [
"name",
"purchase_price",
"sale_price",
"sku"
],
"lenient": true,
"type": "most_fields"
}
}
]
}
}
}
I am trying to learn Elasticsearch and I am using Kibana to visualise things. I cannot seem to figure out what is wrong with my mapping and queries though.
I am trying to store photo metadata (iptc data). And I have the following mapping for it:
{
"settings": {
"index": {
"analysis": {
"filter": {},
"analyzer": {
"keyword_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"trim"
],
"char_filter": [],
"type": "custom",
"tokenizer": "keyword"
},
"edge_ngram_analyzer": {
"filter": [
"lowercase"
],
"tokenizer": "edge_ngram_tokenizer"
},
"edge_ngram_search_analyzer": {
"tokenizer": "lowercase"
}
},
"tokenizer": {
"edge_ngram_tokenizer": {
"type": "edge_ngram",
"min_gram": 2,
"max_gram": 5,
"token_chars": [
"letter"
]
}
}
}
}
},
"mappings": {
"doc": {
"properties": {
"photo_added": {
"type": "date",
"index": true,
"format": "yyyy-MM-dd' 'H:m:s"
},
"photo_id": {
"type": "long",
"index": true
},
"photo_owner": {
"type": "long",
"index": true
},
"project": {
"type": "long",
"index": true
},
"iptc": {
"type": "nested",
"properties": {
"caption/abstract": {
"type": "text",
"index": true
},
"copyright notice": {
"type": "text",
"index": true
},
"keywords": {
"type": "text",
"index": true,
"fields": {
"keywordstring": {
"type": "text",
"analyzer": "keyword_analyzer"
},
"edgengram": {
"type": "text",
"analyzer": "edge_ngram_analyzer",
"search_analyzer": "edge_ngram_search_analyzer"
},
"completion": {
"type": "completion"
},
"keyword": {
"type": "keyword"
}
}
},
"object name": {
"type": "text",
"index": true
},
"province/state": {
"type": "text",
"index": true
},
"sub-location": {
"type": "text",
"index": true
},
"time created": {
"type": "text",
"index": true
},
"urgency": {
"type": "text",
"index": true
},
"writer/editor": {
"type": "text",
"index": true
}
}
}
}
}
}
}
The thing is: I want a query that searching through the keywords and caption for the existence of the search-text. Whenever the search-text is found within keywords, the score is boosted because that indicated that the photo is of higher relevance. So I formulated the following query (where value is the search-text):
GET /photos/_search
{
"query": {
"dis_max": {
"queries": [
{
"fuzzy": {
"iptc.keywords": {
"value": "value",
"fuzziness": 1,
"boost": 1
}
}
},
{
"fuzzy": {
"iptc.caption/abstract": {
"value": "value",
"fuzziness": 1
}
}
}
]
}
}
}
However it does not seem to find any matches despite the fact that the value is in the documents... And I cannot seem to construct a simple match query that will match against the exact text... for example:
GET /photos/doc/_search?error_trace=true
{
"query": {
"match": {
"iptc.caption/abstract": "exact value from one of the documents"
}
}
}
Will return 0 results... the search-text is however exactly in the document.. I don't know what to do know. To make matters worse (for me, since I am near bald thanks due to the frustration that is causing me) Kibana seems to act up.. I am almost sure it is something really simple (the document date is within 5 years) but when filtering for the exact copy pasted value it returns 0 results... as shown in the screenshot...
I am going crazy here. Does someone know how to fix this or what in earths name I am doing wrong?
I found the solution which is in the documentation of Elastic.
Because nested documents are indexed as separate documents, they can only be accessed within the scope of the nested query, the nested/reverse_nested aggregations, or nested inner hits.
Documentation
So I constructed the following query which works.
{
"query": {
"nested": {
"path": "iptc",
"query": {
"bool": {
"should": [
{
"dis_max": {
"queries": [
{
"fuzzy": {
"iptc.keywords": {
"value": "Feyenoord",
"boost": 1
}
}
},
{
"fuzzy": {
"iptc.caption/abstract": {
"value": "Feyenoord",
"fuzziness": 1
}
}
}
]
}
}
]
}
}
}
}
I have a search query which is used to search in report name.
I have indexed the field name with autocomplete,edge_ngram
Normal field name search is proper when i'm having a number / year in the field name it's not working.
Query :
{
"query": {
"function_score": {
"query": {
"bool": {
"should": [
{
"match": {
"field_name": {
"query": "hybrid seeds india 2017",
"operator": "and"
}
}
}
]
}
}
}
},
"from": 0,
"size": 10
}
Setting and the Mappings
{
"mappings": {
"pages": {
"properties": {
"report_name": {
"fields": {
"autocomplete": {
"search_analyzer": "report_name_search",
"analyzer": "report_name_index",
"type": "string"
},
"report_name": {
"index": "not_analyzed",
"type": "string"
}
},
"type": "multi_field"
}
}
}
},
"settings": {
"analysis": {
"filter": {
"report_name_ngram": {
"max_gram": 150,
"min_gram": 2,
"type": "edge_ngram"
}
},
"analyzer": {
"report_name_index": {
"filter": [
"lowercase",
"report_name_ngram"
],
"tokenizer": "keyword"
},
"report_name_search": {
"filter": [
"lowercase"
],
"tokenizer": "keyword"
}
}
}
}
}
Can you guys help me out in this.
Thanks in advance
I would like to be able to return typeahead items in a certain order. For example, search for Para should return:
Paracetamol
Parafin
LIQUID PARAFFIN
ISOMETHEPTENE WITH PARACETAMOL
1) The suggestions that begin with the search term para should be ordered at the top and in alphabetical order
2) The rest of the items should appear below and also in alphabetical order
Is this possible with Elasticsearch?
Update
What if I wanted the output to be like this:
Paracetamol
Parafin
Amber Paraffin
ISOMETHEPTENE WITH PARACETAMOL
LIQUID PARAFFIN
So all the terms that contain the prefix are at the top and everything else in alphabetical order.
This is my suggestion (also, you need to enable scripting):
PUT /test
{
"settings": {
"analysis": {
"analyzer": {
"autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"standard",
"lowercase",
"ngram"
]
},
"search_ngram": {
"type": "custom",
"tokenizer": "keyword",
"filter": "lowercase"
}
},
"filter": {
"ngram": {
"type": "ngram",
"min_gram": 2,
"max_gram": 15
}
}
}
},
"mappings": {
"test": {
"properties": {
"text": {
"type": "string",
"index_analyzer": "autocomplete",
"search_analyzer": "search_ngram",
"index_options": "positions",
"fields": {
"not_analyzed_sorting": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
}
}
POST test/test/_bulk
{"index":{"_id":1}}
{"text":"Paracetamol"}
{"index":{"_id":2}}
{"text":"Paracetamol xxx yyy zzz"}
{"index":{"_id":3}}
{"text":"Parafin"}
{"index":{"_id":4}}
{"text":"LIQUID PARAFFIN"}
{"index":{"_id":5}}
{"text":"ISOMETHEPTENE WITH PARACETAMOL"}
GET /test/test/_search
{
"query": {
"match": {
"text": "Para"
}
},
"sort": [
{
"_script": {
"type": "number",
"script": "termInfo=_index[field_to_search].get(term_to_search.toLowerCase(),_POSITIONS);if (termInfo) {for(pos in termInfo){return pos.position}};return 0;",
"params": {
"term_to_search": "Para",
"field_to_search": "text"
},
"order": "asc"
}
},
{
"text.not_analyzed_sorting": {
"order": "asc"
}
}
]
}
UPDATE
For your updated question, even if I would have preferred to have another post, use the following query:
{
"query": {
"match": {
"text": "Para"
}
},
"sort": [
{
"_script": {
"type": "number",
"script": "termInfo=_index[field_to_search].get(term_to_search.toLowerCase(),_POSITIONS);if (termInfo) {for(pos in termInfo){if (pos.position==0) return pos.position; else return java.lang.Integer.MAX_VALUE}};return java.lang.Integer.MAX_VALUE;",
"params": {
"term_to_search": "Para",
"field_to_search": "text"
},
"order": "asc"
}
},
{
"text.not_analyzed_sorting": {
"order": "asc"
}
}
]
}