Query_string not returning anything in Elasticsearch? - elasticsearch

Hello All i am having a problem that when i was using query_string with mappings given below everything was working fine i am just using default analyzers with no filters.
mappings : {
places_area1: {
properties:{
area1 : {"type" : "string", "index": "analyzed"},
city : {"type" : "string", "index": "analyzed"}
},
}
}
}
}
but now when i am trying to use query_string with this mapping it is not working can someone please tell me what am i doing wrong, i guess its because of whitespace tokenizer but why.
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym_wildcard": {
"tokenizer": "whitespace",
"filter": ["filter_wildcard"]
},
"synonym_term": {
"tokenizer": "keyword",
"filter": ["filter_term"]
},
"simple_wildcard": {
"tokenizer": "whitespace"
}
},
"filter": {
"filter_term": {
"tokenizer": "keyword", // here you have to write this only for tokenizer keyword but not for whitespace
"type": "synonym",
"synonyms_path": "synonyms.txt",
},
"filter_wildcard": {
"type": "synonym",
"synonyms_path": "synonyms.txt",
}
}
}
}
},
mappings : {
places_area1: {
properties:{
area1 : {"type" : "string", "index": "analyzed", "analyzer": "simple_wildcard"},
city : {"type" : "string", "fields": {
"raw": {
"type": "string",
"analyzer": "synonym_term"
},
"raw_wildcard": {
"type": "string",
"analyzer": "synonym_wildcard"
}
} },
}
}
}
}

I think the problem could be your query is lowercased because by default "lowercase_expanded_terms" is true
{
"query": {
"query_string": {
"default_field": "state",
"query": "Ban*",
"lowercase_expanded_terms": false
}
}
}
Now this should match Bangalore

Related

Synonym analyzer with aggregation gives "unable to parse BaseAggregationBuilder with name [match]: parser not found" error

I have an Elastic Search project with my aggregation and filter working correctly before I added synonym analyzer to mapping.
Current working Mapping :
"settings": {
"analysis": {
"normalizer": {
"lowercase": {
"type": "custom",
"filter": ["lowercase"]
}
}
}
},
"mappings": {
"doc": {
"dynamic": "false",
"properties": {
"primarytrades": {
"type": "nested",
"properties" :{
"name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256,
"normalizer": "lowercase"
}
}
}
}
}
}
}
}
#This is request and response with expected bucketed values:
Request:
{"aggs":{"filter_trades":{"aggs":{"nested_trades":{"aggs":{"autocomplete_trades":{"terms":{"field":"primarytrades.name.keyword","include":".*p.*l.*u.*m.b.","size":10}}},"nested":{"path":"primarytrades"}}},"filter":{"nested":{"path":"primarytrades","query":{"bool":{"should":[{"match":{"primarytrades.name":{"fuzziness":2,"query":"plumb"}}},{"match_phrase_prefix":{"primarytrades.name":{"query":"plumb"}}}]}}}}}},"query":{"bool":{"filter":[{"nested":{"path":"primarytrades","query":{"bool":{"should":[{"match":{"primarytrades.name":{"fuzziness":2,"query":"plumb"}}},{"match_phrase_prefix":{"primarytrades.name":{"query":"plumb"}}}]}}}}]}},"size":0}
Response:
{"took":1,"timed_out":false,"_shards":{"total":5,"successful":5,"skipped":0,"failed":0},"hits":{"total":7216,"max_score":0.0,"hits":[]},"aggregations":{"filter#filter_trades":{"doc_count":7216,"nested#nested_trades":{"doc_count":48496,"sterms#autocomplete_trades":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"plumbing","doc_count":7192},{"key":"plumbing parts","doc_count":179}]}}}}}
To add synonym search feature to this, I changed mapping with synonym analyzer like this :
"settings": {
"analysis": {
"normalizer": {
"lowercase": {
"type": "custom",
"filter": [ "lowercase" ]
}
},
"analyzer": {
"synonym_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [ "lowercase", "my_synonyms" ]
}
},
"filter": {
"my_synonyms": {
"type": "synonym",
"synonyms": [ "piping, sink, plumbing" ],
"updateable": true
}
}
}
},
"mappings": {
"doc": {
"dynamic": "false",
"properties": {
"primarytrades": {
"type": "nested",
"properties" :{
"name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
},
"analyzed": {
"type": "text",
"analyzer": "standard",
"search_analyzer": "synonym_analyzer"
}
}
}
}
}
}
}
}
And also, I changed my query to use search_analyzer as below :
{"aggs":{"filter_trades":{"aggs":{"nested_trades":{"aggs":{"autocomplete_trades":{"match":{"field":"primarytrades.name.analyzed","include":".*p.*l.*u.*m.b.","size":10}}},"nested":{"path":"primarytrades"}}},"filter":{"nested":{"path":"primarytrades","query":{"bool":{"should":[{"match":{"primarytrades.name":{"fuzziness":2,"query":"plumb","search_analyzer":"synonym_analyzer"}}},{"match_phrase_prefix":{"primarytrades.name":{"query":"plumb","search_analyzer":"synonym_analyzer"}}}]}}}}}},"query":{"bool":{"filter":[{"nested":{"path":"primarytrades","query":{"bool":{"should":[{"match":{"primarytrades.name":{"fuzziness":2,"query":"plumb","search_analyzer":"synonym_analyzer"}}},{"match_phrase_prefix":{"primarytrades.name":{"query":"plumb","search_analyzer":"synonym_analyzer"}}}]}}}}]}}}
I am getting this error :
"type": "named_object_not_found_exception",
"reason": "[8:24] unable to parse BaseAggregationBuilder with name [match]: parser not found"
Can someone help me correct the query ?
Thanks in advance!
In your match queries, you need to specify analyzer and not search_analyzer. search_analyzer is only a valid keyword in the mapping section.
{
"match": {
"primarytrades.name": {
"fuzziness": 2,
"query": "plumb",
"analyzer": "synonym_analyzer" <--- change this
}
}
},

Unable to get similar results with a filter applied specific field

I am working on Elastic Search 6.4.2. I am uploading my index and mapping and my searchable fields are title,content and I want to filter the results by "test" field. The values in the test fields are abce, ghij, klmn. I want to filter the results with field "test" and value "ghij".
PUT /test_index
{
"settings": {
"index": {
"number_of_shards": 4,
"number_of_replicas": 1,
"refresh_interval": "60s",
"analysis" : {
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : ["standard", "lowercase", "my_snow"]
},
"blogs_analyzer": {
"type": "stop",
"stopwords": "_english_"
}
} ,
"filter" : {
"my_snow" : {
"type" : "snowball",
"language" : "Lovins"
}
}
}
}
},
"mappings": {
"doc": {
"_source": {
"enabled": true
},
"properties": {
"content": {
"type": "text",
"index": "true",
"store": true,
"analyzer":"my_analyzer",
"search_analyzer": "my_analyzer"
},
"host": {
"type": "keyword",
"index": "true",
"store": true
},
"title": {
"type": "text",
"index": "true",
"store": true,
"analyzer":"my_analyzer",
"search_analyzer": "my_analyzer"
},
"url": {
"type": "text",
"index": "true",
"store": true
},
"test": {
"type": "keyword",
"index": "true",
"store": true
}
}
}
}
}
I tried with the below body in order to get the results.
POST test_index/_search
{
"query": {
"bool": {
"should": [{
"match": {
"content": {
"query": "sports"
}
}
},
{
"match": {
"title": {
"query": "sports"
}
}
}
],
"filter": {
"bool": {
"must": [{
"term": {
"test": "ghij"
}
}]
}
}
}
}
}
If I send the above request I am getting lesser records and I use send direct request GET /test_index/_search?q=sports I am getting more number of results

Elasticsearch query multiple types with different bool

I have an index with 3 different types of content: ['media','group',user'] and I need to do a search at the three at the same type, but requesting some extra parameters that one of them must accomplish before adding to the results list.
Here is my current index data:
{
"settings": {
"analysis": {
"filter": {
"nGram_filter": {
"type": "nGram",
"min_gram": 2,
"max_gram": 20,
"token_chars": [
"letter",
"digit",
"punctuation",
"symbol"
]
}
},
"analyzer": {
"nGram_analyzer": {
"type": "custom",
"tokenizer": "whitespace",
"filter": [
"lowercase",
"asciifolding",
"nGram_filter"
]
},
"whitespace_analyzer": {
"type": "custom",
"tokenizer": "whitespace",
"filter": [
"lowercase",
"asciifolding"
]
}
}
}
},
"mappings": {
"media": {
"_all": {
"analyzer": "nGram_analyzer",
"search_analyzer": "whitespace_analyzer"
},
"properties": {
"UID": {
"type": "integer",
"include_in_all": false
},
"addtime": {
"type": "integer",
"include_in_all": false
},
"title": {
"type": "string",
"index": "not_analyzed"
}
}
},
"group": {
"_all": {
"analyzer": "nGram_analyzer",
"search_analyzer": "whitespace_analyzer"
},
"properties": {
"UID": {
"type": "integer",
"include_in_all": false
},
"name": {
"type": "string",
"index": "not_analyzed"
},
"desc": {
"type": "string",
"include_in_all": false
}
}
},
"user": {
"_all": {
"analyzer": "nGram_analyzer",
"search_analyzer": "whitespace_analyzer"
},
"properties": {
"addtime": {
"type": "integer",
"include_in_all": false
},
"username": {
"type": "string"
}
}
}
}
}
So currently I can make a search on all the index with
{
query: {
match: {
_all: {
"query": "foo",
"operator": "and"
}
}
}
}
and get the results for media, groups or users with the word "foo" on it, which is great, but I need to make it remove all the media on which the user is not the owner of the results. So I guess I need to do a bool query where I set the "must" clause and add the 'UID' variable to whatever the current user ID is.
My problem is how to do this and how to specify that the filter will work just on one type while leaving the others untouched.
I haven't been able to find an answer on the Elastic Search documentation
At the end I was able to accomplish this by following Andrei's comments. I know it is not perfect since I had to add a should with the types "group" and "user", but it fit perfectly with my design since I need to put more filters on those too. Be advice that the search will end up being slower.
curl -X GET 'http://localhost:9200/foo/_search' -d '
{
"query": {
"bool" :
{
"must" :
{
"query" : {
"match" :
{
"_all":
{
"query" : "test"
}
}
}
},
"filter":
{
"bool":
{
"should":
[{
"bool" : {
'must':
[{
"type":
{
"value": "media"
}
},
{
'bool':
{
"should" : [
{ "term" : {"UID" : 2}},
{ "term" : {"type" : "public"}}
]
}
}]
}
},
{
"bool" : {
"should" : [
{ "type" : {"value" : "group"}},
{ "type" : {"value" : "user"}}
]
}
}]
}
}
}
}
}'

In ES how to write mappings so that to use wildcard query for both lowercase as well as uppercase?

Hello all i am facing two problems in ES
I have a 'city' 'New York' in ES now i want to write a term filter such that if given string exactly matches "New York" then only it returns but what is happening is that when my filter matches "New" OR "York" for both it returns "New York" but it is not returning anything for "New York" my mapping is given below please tell me which analyzer or tokenizer should i use inside mapping
Here are the settings and mapping:
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym": {
"tokenizer": "whitespace",
"filter": ["synonym"]
}
},
"filter": {
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt"
}
}
}
}
},
mappings : {
"restaurant" : {
properties:{
address : {
properties:{
city : {"type" : "string", "analyzer": "synonym"},
}
}
}
}
Second problem is that when i am trying to use wildcard query on lowercase example "new*" then ES is not returning not anything but when i am trying to search uppercase example "New*" now it is returning "New York" now i in this second case i want to write my city mappings such that when i search for lowercase or uppercase for both ES returns the same thing i have seen ignore case and i have set it to false inside synonyms but still i am not able to search for both lowercase and uppercases.
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt",
"ignore_case": true // See here
}
I believe you didn't provide enough details, but hoping that my attempt will generate questions from you, I will post what I believe it should be a step forward:
The mapping:
PUT test
{
"settings": {
"index": {
"analysis": {
"analyzer": {
"synonym": {
"tokenizer": "whitespace",
"filter": [
"synonym"
]
},
"keyword_lowercase": {
"type": "custom",
"tokenizer": "keyword",
"filter": [
"lowercase"
]
}
},
"filter": {
"synonym": {
"type": "synonym",
"synonyms_path": "synonyms.txt",
"ignore_case": true
}
}
}
}
},
"mappings": {
"restaurant": {
"properties": {
"address": {
"properties": {
"city": {
"type": "string",
"analyzer": "synonym",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"raw_ignore_case": {
"type": "string",
"analyzer": "keyword_lowercase"
}
}
}
}
}
}
}
}
}
Test data:
POST /test/restaurant/1
{
"address": {"city":"New York"}
}
POST /test/restaurant/2
{
"address": {"city":"new york"}
}
Query for the first problem:
GET /test/restaurant/_search
{
"query": {
"filtered": {
"filter": {
"term": {
"address.city.raw": "New York"
}
}
}
}
}
Query for the second problem:
GET /test/restaurant/_search
{
"query": {
"query_string": {
"query": "address.city.raw_ignore_case:new*"
}
}
}

Elasticsearch facets comes as whitespace tokenized

I have the following mapping for elasticsearch
{
"mappings": {
"hotel": {
'properties': {"name": {
"type": "string",
"search_analyzer": "str_search_analyzer",
"index_analyzer": "str_index_analyzer"},
"destination": {'properties': {'en': {
"type": "string",
"search_analyzer": "str_search_analyzer",
"index_analyzer": "str_index_analyzer"}}},
"country": {"properties": {"en": {
"type": "string",
"search_analyzer": "str_search_analyzer",
"index_analyzer": "str_index_analyzer"}}},
"destination_facets": {"properties": {"en": {
"type": "string",
"search_analyzer": "facet_analyzer"
}}}
}
}
},
"settings": {
"analysis": {
"analyzer": {
"str_search_analyzer": {
"tokenizer": "keyword",
"filter": ["lowercase"]
},
"str_index_analyzer": {
"tokenizer": "keyword",
"filter": ["lowercase", "substring"]
},
"facet_analyzer": {
"type": "keyword",
"tokenizer": "keyword"
},
},
"filter": {
"substring": {
"type": "edgeNGram",
"min_gram": 1,
"max_gram": 20,
}
}
}
}
}
Which I want my destination_facets to be not tokenized. But it comes as white-space tokenized. Is there a way to ignore all token activities?
You probably need to set your facet_analyzer not only for the search_analyzer but also for the index_analyzer (Elasticsearch probably use this one for facetting, the search_analyzer is only used to parse query strings).
Note that if you want the same analyze for both, you can just use the name analyzer in your mapping.
Ex :
{
"mappings": {
"hotel": {
...
"destination_facets": {"properties": {"en": {
"type": "string",
"analyzer": "facet_analyzer"
}}}
}
}
},
"settings": {
...
}
}

Resources