ngram for wildcard search in Elastic Search - elasticsearch

I am trying to provide the search to end user with type as they go which is is more like sqlserver. I was able to implement ES query for the given sql scenario:
select * from table where name like '%peter tom%' and type != 'xyz
In ES i used ngram tokenizer in order to achieve the desired results :
PUT sample
{
"settings": {
"analysis": {
"analyzer": {
"my_ngram_analyzer": {
"tokenizer": "my_ngram_tokenizer"
}
},
"tokenizer": {
"my_ngram_tokenizer": {
"type": "nGram",
"min_gram": "2",
"max_gram": "15"
}
}
}
},
"mappings": {
"typename": {
"properties": {
"name": {
"type": "string",
"fields": {
"search": {
"type": "string",
"analyzer": "my_ngram_analyzer"
}
}
},
"type": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
{
"query": {
"bool": {
"should": [
{
"term": {
"name.search": "peter tom"
}
}
],
"must_not": [
{
"match": {
"type": "xyz"
}
},
{
"match": {
"type": "abc"
}
}
]
}
}
}
So if my document rows are like
name type
peter tomson efg
Peter tomson robert simson efg
The above query only shows be both the documents but when i try to type in Peter sims or Peter simson it doesnt return the second document unless i type in Peter tomson robert sims or Peter tomson robert simson .So basically i have to type all the following words after Peter and before simson to get to the second document . Is there any way to get the second document with partial matching .I can use the query match with and "AND" operation but that is still on exact match of the word.I am looking for partial match like Peter sims should give me second row of the documents .
Thanks

I found the answer to the query myself posting the solution for further reference for other users :
{
"settings": {
"analysis": {
"analyzer": {
"autocomplete": {
"tokenizer": "whitespace",
"filter": [
"lowercase",
"autocomplete"
]
},
"autocomplete_search": {
"tokenizer": "whitespace",
"filter": [
"lowercase"
]
}
},
"filter": {
"autocomplete": {
"type": "nGram",
"min_gram": 2,
"max_gram": 40
}
}
}
},
"mappings": {
"doc": {
"properties": {
"title": {
"type": "string",
"analyzer": "autocomplete",
"search_analyzer": "autocomplete_search"
}
}
}
}
}
PUT my_index/doc/1
{
"title": "peter tomson"
}
PUT my_index/doc/2
{
"title": "Peter tomson robert simson"
}
GET my_index/doc/_search
{
"query": {
"match": {
"title": {
"query": "Pete sim",
"operator": "and"
}
}
}
}

Related

Elasticsearch term query to number token

I need to explain some weird behavior of term query to Elasticsearch database which contains number part in the string. Query is pretty simple:
{
"query": {
"bool": {
"should": [
{
"term": {
"address.street": "8 kvetna"
}
}
]
}
}
}
The problem is that term 8 kvetna returns empty result. I tried to _analyze it ad it make regular tokens like 8, k, kv, kve .... Also I am pretty sure there is a value 8 kvetna in database.
Here is the mapping for the field:
{
"settings": {
"index": {
"refresh_interval": "1m",
"number_of_shards": "1",
"number_of_replicas": "1",
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "20"
}
},
"analyzer": {
"autocomplete": {
"filter": [
"lowercase",
"asciifolding",
"autocomplete_filter"
],
"type": "custom",
"tokenizer": "standard"
}
"default": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "standard"
}
}
}
}
},
"mappings": {
"doc": {
"dynamic": "strict",
"_all": {
"enabled": false
},
"properties": {
"address": {
"properties": {
"city": {
"type": "text",
"analyzer": "autocomplete"
},
"street": {
"type": "text",
"analyzer": "autocomplete"
}
}
}
}
}
}
}
What caused this weird result? I don't understand it. Thanks for any help.
Great start so far! Your only issue is that you're using a term query, while you should use a match one. A term query will try to do an exact match for 8 kvetna and that's not what you want. The following query will work:
{
"query": {
"bool": {
"should": [
{
"match": { <--- change this
"address.street": "8 kvetna"
}
}
]
}
}
}

elasticsearch match query is not working for numbers

I have a search query which is used to search in report name.
I have indexed the field name with autocomplete,edge_ngram
Normal field name search is proper when i'm having a number / year in the field name it's not working.
Query :
{
"query": {
"function_score": {
"query": {
"bool": {
"should": [
{
"match": {
"field_name": {
"query": "hybrid seeds india 2017",
"operator": "and"
}
}
}
]
}
}
}
},
"from": 0,
"size": 10
}
Setting and the Mappings
{
"mappings": {
"pages": {
"properties": {
"report_name": {
"fields": {
"autocomplete": {
"search_analyzer": "report_name_search",
"analyzer": "report_name_index",
"type": "string"
},
"report_name": {
"index": "not_analyzed",
"type": "string"
}
},
"type": "multi_field"
}
}
}
},
"settings": {
"analysis": {
"filter": {
"report_name_ngram": {
"max_gram": 150,
"min_gram": 2,
"type": "edge_ngram"
}
},
"analyzer": {
"report_name_index": {
"filter": [
"lowercase",
"report_name_ngram"
],
"tokenizer": "keyword"
},
"report_name_search": {
"filter": [
"lowercase"
],
"tokenizer": "keyword"
}
}
}
}
}
Can you guys help me out in this.
Thanks in advance

Google type query using Elasticsearch

Suppose I have the following document:
{title:"Sennheiser HD 800"}
I want to all this queries return this document.
senn
heise
sennheise
sennheiser
sennheiser 800
sennheiser hd
hd
800 hd
hd ennheise
In short I want to find partial words either one or more.
In my map i am using this analyzer
{
"settings": {
"analysis": {
"analyzer": {
"case_insensitive_sort": {
"tokenizer": "keyword",
"filter": [
"lowercase"
]
}
}
}
}
}
and the map
{
"title": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"lower_case_sort": {
"type": "string",
"analyzer": "case_insensitive_sort"
}
}
}
}
and the query is a simple string query
{
"query": {
"query_string": {
"fields": [
"title.lower_case_sort"
],
"query": "*800 hd*"
}
}
}
For example this query fails.
You need ngrams.
Here is a blog post I wrote up about it for Qbox:
https://qbox.io/blog/an-introduction-to-ngrams-in-elasticsearch
(Note that "index_analyzer" no longer works in ES 2.x; use "analyzer" instead; "search_analyzer" still works, though.)
Using this mapping (slightly modified from one in the blog post; I'll refer you there for an in-depth explanation):
PUT /test_index
{
"settings": {
"analysis": {
"filter": {
"ngram_filter": {
"type": "ngram",
"min_gram": 2,
"max_gram": 20
}
},
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"ngram_filter"
]
}
}
}
},
"mappings": {
"doc": {
"properties": {
"title": {
"type": "string",
"analyzer": "ngram_analyzer",
"search_analyzer": "standard"
}
}
}
}
}
index your document:
POST /test_index/doc/1
{
"title": "Sennheiser HD 800"
}
and then any of your listed queries work, in the following form:
POST /test_index/_search
{
"query": {
"match": {
"title": {
"query": "heise hd 800",
"operator": "and"
}
}
}
}
If you only have a single term, then you don't need the "operator" part:
POST /test_index/_search
{
"query": {
"match": {
"title": "hd"
}
}
}
Here is some code I used to play around with it:
http://sense.qbox.io/gist/a9accf67f1713ca99819f45ce0ac28adaea691a9

Highlight with fuzziness and ngram

I guess the title of the topic spoiled you enough :D
I use edge_ngram and highlight to build an autocomplete search. I have added fuzziness in the query to allow users to mispell their search, but it brokes a bit the highlight.
When i write Sport this is what I get :
<em>Spor</em>t
<em>Spor</em>t mécanique
<em>Spor</em>t nautique
I guess it's because it matches with the token spor generated by the ngram tokenizer.
The query:
{
"query": {
"bool": {
"should": [
{
"match": {
"name": {
"query": "sport",
"operator": "and",
"fuzziness": "AUTO"
}
}
},
{
"match_phrase_prefix": {
"name.raw": {
"query": "sport"
}
}
}
]
}
},
"highlight": {
"fields": {
"name": {
"term_vector": "with_positions_offsets"
}
}
}
}
And the mapping:
{
"settings": {
"analysis": {
"analyzer": {
"partialAnalyzer": {
"type": "custom",
"tokenizer": "ngram_tokenizer",
"filter": ["asciifolding", "lowercase"]
},
"keywordAnalyzer": {
"type": "custom",
"tokenizer": "keyword",
"filter": ["asciifolding", "lowercase"]
},
"searchAnalyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["asciifolding", "lowercase"]
}
},
"tokenizer": {
"ngram_tokenizer": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "15",
"token_chars": [ "letter", "digit" ]
}
}
}
},
"mappings": {
"place": {
"properties": {
"name": {
"type": "string",
"index_analyzer": "partialAnalyzer",
"search_analyzer": "searchAnalyzer",
"term_vector": "with_positions_offsets",
"fields": {
"raw": {
"type": "string",
"analyzer": "keywordAnalyzer"
}
}
}
}
}
}
}
I tried to add a new match clause without fuzziness in the query to try to match the keyword before the match with fuzziness but it changed nothing.
'match': {
'name': {
'query': 'sport',
'operator': 'and'
}
Any idea how I can handle this?
Regards, Raphaël
You could do that with highlight_query I guess
Try this in your highlighting query.
"highlight": {
"fields": {
"name": {
"term_vector": "with_positions_offsets",
"highlight_query": {
"match": {
"name.raw": {
"query": "spotr",
"fuzziness": 2
}
}
}
}
}
}
I hope it helps.

In ES how to write mappings so that to use wildcard query for both lowercase as well as uppercase?

Hello all i am facing two problems in ES
I have a 'city' 'New York' in ES now i want to write a term filter such that if given string exactly matches "New York" then only it returns but what is happening is that when my filter matches "New" OR "York" for both it returns "New York" but it is not returning anything for "New York" my mapping is given below please tell me which analyzer or tokenizer should i use inside mapping
Here are the settings and mapping:
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym": {
"tokenizer": "whitespace",
"filter": ["synonym"]
}
},
"filter": {
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt"
}
}
}
}
},
mappings : {
"restaurant" : {
properties:{
address : {
properties:{
city : {"type" : "string", "analyzer": "synonym"},
}
}
}
}
Second problem is that when i am trying to use wildcard query on lowercase example "new*" then ES is not returning not anything but when i am trying to search uppercase example "New*" now it is returning "New York" now i in this second case i want to write my city mappings such that when i search for lowercase or uppercase for both ES returns the same thing i have seen ignore case and i have set it to false inside synonyms but still i am not able to search for both lowercase and uppercases.
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt",
"ignore_case": true // See here
}
I believe you didn't provide enough details, but hoping that my attempt will generate questions from you, I will post what I believe it should be a step forward:
The mapping:
PUT test
{
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym": {
"tokenizer": "whitespace",
"filter": [
"synonym"
]
},
"keyword_lowercase": {
"type": "custom",
"tokenizer": "keyword",
"filter": [
"lowercase"
]
}
},
"filter": {
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt",
"ignore_case": true
}
}
}
}
},
"mappings": {
"restaurant": {
"properties": {
"address": {
"properties": {
"city": {
"type": "string",
"analyzer": "synonym",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"raw_ignore_case": {
"type": "string",
"analyzer": "keyword_lowercase"
}
}
}
}
}
}
}
}
}
Test data:
POST /test/restaurant/1
{
"address": {"city":"New York"}
}
POST /test/restaurant/2
{
"address": {"city":"new york"}
}
Query for the first problem:
GET /test/restaurant/_search
{
"query": {
"filtered": {
"filter": {
"term": {
"address.city.raw": "New York"
}
}
}
}
}
Query for the second problem:
GET /test/restaurant/_search
{
"query": {
"query_string": {
"query": "address.city.raw_ignore_case:new*"
}
}
}

Resources