Elasticsearch query do not work with # value - elasticsearch

When I execute a simple search query on an email it does not return anything to me, unless I remove what follows the "#", why?
I wish to make queries on the e-mails in fuzzy and autocompletion.
ELASTICSEARCH INFOS:
{
"name" : "ZZZ",
"cluster_name" : "YYY",
"cluster_uuid" : "XXX",
"version" : {
"number" : "6.5.2",
"build_flavor" : "default",
"build_type" : "tar",
"build_hash" : "WWW",
"build_date" : "2018-11-29T23:58:20.891072Z",
"build_snapshot" : false,
"lucene_version" : "7.5.0",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
MAPPING :
PUT users
{
"mappings":
{
"_doc": { "properties": { "mail": { "type": "text" } } }
}
}
ALL DATAS :
[
{ "mail": "firstname.lastname#company.com" },
{ "mail": "john.doe#company.com" }
]
QUERY WORKS :
Term request works but mail == "firstname.lastname#company.com" and not "firstname.lastname"...
QUERY :
GET users/_search
{ "query": { "term": { "mail": "firstname.lastname" } }}
RETURN :
{
"took": 7,
"timed_out": false,
"_shards": { "total": 6, "successful": 6, "skipped": 0, "failed": 0 },
"hits": {
"total": 1,
"max_score": 4.336203,
"hits": [
{
"_index": "users",
"_type": "_doc",
"_id": "H1dQ4WgBypYasGfnnXXI",
"_score": 4.336203,
"_source": {
"mail": "firstname.lastname#company.com"
}
}
]
}
}
QUERY NOT WORKS :
QUERY :
GET users/_search
{ "query": { "term": { "mail": "firstname.lastname#company.com" } }}
RETURN :
{
"took": 0,
"timed_out": false,
"_shards": { "total": 6, "successful": 6, "skipped": 0, "failed": 0 },
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
SOLUTION :
Change mapping (reindex after mapping changes) with uax_url_email analyzer for mails.
PUT users
{
"settings":
{
"index": { "analysis": { "analyzer": { "mail": { "tokenizer":"uax_url_email" } } } }
}
"mappings":
{
"_doc": { "properties": { "mail": { "type": "text", "analyzer":"mail" } } }
}
}

If you use no other tokenizer for your indexed text field, it will use the standard tokenizer, which tokenizes on the # symbol [I don't have a source on this, but there's proof below].
If you use a term query rather than a match query then that exact term will be searched for in the inverted index elasticsearch match vs term query.
Your inverted index looks like this
GET users/_analyze
{
"text": "firstname.lastname#company.com"
}
{
"tokens": [
{
"token": "firstname.lastname",
"start_offset": 0,
"end_offset": 18,
"type": "<ALPHANUM>",
"position": 0
},
{
"token": "company.com",
"start_offset": 19,
"end_offset": 30,
"type": "<ALPHANUM>",
"position": 1
}
]
}
To resolve this you could specify your own analyzer for the mail field or you could use the match query, which will analyze your searched text just like how it analyzes the indexed text.
GET users/_search
{
"query": {
"match": {
"mail": "firstname.lastname#company.com"
}
}
}

Related

Change field type in index without reindex

First, I had this index template
GET localhost:9200/_index_template/document
And this is output
{
"index_templates": [
{
"name": "document",
"index_template": {
"index_patterns": [
"v*-documents-*"
],
"template": {
"settings": {
"index": {
"number_of_shards": "1"
}
},
"mappings": {
"properties": {
"firstOperationAtUtc": {
"format": "epoch_millis",
"ignore_malformed": true,
"type": "date"
},
"firstOperationAtUtcDate": {
"ignore_malformed": true,
"type": "date"
}
}
},
"aliases": {
"documents-": {}
}
},
"composed_of": [],
"priority": 501,
"version": 1
}
}
]
}
And my data is indexed, for example
GET localhost:9200/v2-documents-2021-11-20/_search
{
"query": {
"bool": {
"should": [
{
"exists": {
"field": "firstOperationAtUtc"
}
}
]
}
}
}
Output is
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 1.0,
"hits": [
{
"_index": "v2-documents-2021-11-20",
"_type": "_doc",
"_id": "9b46d6fe78735274342d1bc539b084510000000455",
"_score": 1.0,
"_source": {
"firstOperationAtUtc": 1556868952000,
"firstOperationAtUtcDate": "2019-05-03T13:35:52.000Z"
}
}
]
}
}
Next, I need to update mapping for field firstOperationAtUtc and remove format epoch_millis
localhost:9200/_template/document
{
"index_patterns": [
"v*-documents-*"
],
"template": {
"settings": {
"index": {
"number_of_shards": "1"
}
},
"mappings": {
"properties": {
"firstOperationAtUtc": {
"ignore_malformed": true,
"type": "date"
},
"firstOperationAtUtcDate": {
"ignore_malformed": true,
"type": "date"
}
}
},
"aliases": {
"documents-": {}
}
},
"version": 1
}
After that, If I get previous request I still have indexed data.
But now I need to update field firstOperationAtUtc and set data from firstOperationAtUtcDate
localhost:9200/v2-documents-2021-11-20/_update_by_query
{
"script": {
"source": "if (ctx._source.firstOperationAtUtcDate != null) { ctx._source.firstOperationAtUtc = ctx._source.firstOperationAtUtcDate }",
"lang": "painless"
},
"query": {
"match": {
"_id": "9b46d6fe78735274342d1bc539b084510000000455"
}
}
}
After that, if I get previous request
GET localhost:9200/v2-documents-2021-11-20/_search
{
"query": {
"bool": {
"should": [
{
"exists": {
"field": "firstOperationAtUtc"
}
}
]
}
}
}
I have no indexed data
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 0,
"relation": "eq"
},
"max_score": null,
"hits": []
}
}
But if I find with id, I will get this data with modify data but my field is ignored
GET localhost:9200/v2-documents-2021-11-20/_search
{
"query": {
"terms": {
"_id": [ "9b46d6fe78735274342d1bc539b084510000000455" ]
}
}
}
Output is
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 1.0,
"hits": [
{
"_index": "v2-documents-2021-11-20",
"_type": "_doc",
"_id": "9b46d6fe78735274342d1bc539b084510000000455",
"_score": 1.0,
"_ignored": [
"firstOperationAtUtc"
],
"_source": {
"firstOperationAtUtc": "2019-05-03T13:35:52.000Z",
"firstOperationAtUtcDate": "2019-05-03T13:35:52.000Z"
}
}
]
}
}
How I could indexed data without reindex? Because I have milliard data in index and this could may produce huge downtime in prod
What you changed is the index template, but not your index mapping. The index template is used only when a new index that matches the name pattern is created.
What you want to do is to modify the actual mapping of your index, like this:
PUT test/_mapping
{
"properties": {
"firstOperationAtUtc": {
"ignore_malformed": true,
"type": "date"
}
}
}
However, this won't be possible and you will get the following error, which makes sense as you cannot modify an existing field mapping.
Mapper for [firstOperationAtUtc] conflicts with existing mapper:
Cannot update parameter [format] from [epoch_millis] to [strict_date_optional_time||epoch_millis]
The only reason why your update by query seemed to work is because you have "ignore_malformed": true in your mapping. Because if you remove that parameter and try to run your update by query again, you'd see the following error:
"type" : "mapper_parsing_exception",
"reason" : "failed to parse field [firstOperationAtUtc] of type [date] in document with id '2'. Preview of field's value: '2019-05-03T13:35:52.000Z'",
"caused_by" : {
"type" : "illegal_argument_exception",
"reason" : "failed to parse date field [2019-05-03T13:35:52.000Z] with format [epoch_millis]",
"caused_by" : {
"type" : "date_time_parse_exception",
"reason" : "date_time_parse_exception: Failed to parse with all enclosed parsers"
}
}
So, to wrap it up, you have two options:
Create a new index with the right mapping and reindex your old index into it, but that doesn't seem like an option for you.
Create a new field in your existing index mapping (e.g. firstOperationAtUtcTime) and discard the use of firstOperationAtUtc
The steps would be:
Modify the index template to add the new field
Modify the actual index mapping to add the new field
Run your update by query by modifying the script to write your new field
In short:
# 1. Modify your index template
# 2. modify your actual index mapping
PUT v2-documents-2021-11-20/_mapping
{
"properties": {
"firstOperationAtUtcTime": {
"ignore_malformed": true,
"type": "date"
}
}
}
# 3. Run update by query again
POST v2-documents-2021-11-20/_update_by_query
{
"script": {
"source": "if (ctx._source.firstOperationAtUtcDate != null) { ctx._source.firstOperationAtUtcTime = ctx._source.firstOperationAtUtcDate; ctx._source.remove('firstOperationAtUtc')}",
"lang": "painless"
},
"query": {
"match": {
"_id": "9b46d6fe78735274342d1bc539b084510000000455"
}
}
}

Get elasticsearch to ignore diacritics and accents in search hit

I want to search data on elasticsearch with different languages, and expect the data will be retrieved no matter if there is a diacritics or accent.
``
for example I have this data:
``
POST ابجد/_doc/31
{
"name":"def",
"city":"Tulkarem"
}
``
POST ابجٌد/_doc/31 { "name":"def", "city":"Tulkarem" }
PUT /abce
{
"settings" : {
"analysis" : {
"analyzer" : {
"default" : {
"tokenizer" : "standard",
"filter" : ["my_ascii_folding"]
}
},
"filter" : {
"my_ascii_folding" : {
"type" : "asciifolding",
"preserve_original" : true
}
}
}
}
}
The difference between the two indexes is the diacritics.
Trying to get data:
GET ابجد/_search
I need it to retrieve both index, currently it is revering this:
`{
"took": 2,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 1,
"hits": [
{
"_index": "ابجد",
"_id": "31",
"_score": 1,
"_source": {
"name": "def",
"city": "Tulkarem"
}
}
]
}
}

Elasticsearch advanced autocomplete

I want to autocomplete user input with Elasticsearch. Now There are tons of tutorials out there how to do so, but none go into the really detailed stuff.
The last issue I'm having with my query is that it should score Results that are not real "autocompletions" lower. Example:
IS:
I type: "Bed"
I find: "Bed", "Bigbed", "Fancy Bed", "Bed Frame"
WANT:
I type: "Bed"
I find: "Bed", "Bed Frame", [other "Bed XXX" results], "Fancy Bed", "Bigbed"
So i want Elasticsearch to first complete "to the right" if that makes sense. And then use results that have words in front of it.
I've tried the completion suggester I doesn't do other stuff I want but also has the same issue.
In German there are lots of examples of words like Bigbed (which isn't a real word in English, I know. But I don't want those words as high results. But since they match closer than Bed Frame (because that is 2 Tokens) they show up so high.
This is my query currently:
POST autocompletion/_search?pretty
{
"query": {
"function_score": {
"query": {
"match": {
"keyword": {
"query": "Bed",
"fuzziness": 1,
"minimum_should_match": "100%"
}
}
},
"field_value_factor": {
"field": "bias",
"factor": 1
}
}
}
}
If you use elasticsearch completion suggester, as explained at https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html, when querying like:
{
"suggest": {
"song-suggest" : {
"prefix" : "bed",
"completion" : {
"field" : "suggest"
}
}
}
}
You will get:
{
"took": 13,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 0,
"max_score": 0.0,
"hits": []
},
"suggest": {
"song-suggest": [
{
"text": "bed",
"offset": 0,
"length": 3,
"options": [
{
"text": "Bed",
"_index": "autocomplete",
"_type": "_doc",
"_id": "1",
"_score": 34.0,
"_source": {
"suggest": {
"input": [
"Bed"
],
"weight": 34
}
}
},
{
"text": "Bed Frame",
"_index": "autocomplete",
"_type": "_doc",
"_id": "3",
"_score": 34.0,
"_source": {
"suggest": {
"input": [
"Bed Frame"
],
"weight": 34
}
}
}
]
}
]
}
}
If you want to use the search API instead, you can use 2 queries:
prefix query "bed ****"
with a term starting by "bed"
Here the mapping:
{
"mappings": {
"_doc" : {
"properties" : {
"suggest" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword"
}
}
}
}
}
}
Here the search query:
{
"query" : {
"bool" : {
"must" : [
{
"match" : {
"suggest" : "Bed"
}
}
],
"should" : [
{
"prefix" : {
"suggest.keyword" : "Bed"
}
}
]
}
}
}
The should clause will boost document starting by "Bed". Et voilà!

elasticsearch query string probplem with lowercase and underscore

I create a custom analyzer in my Elasticsearch, I want to seperate from only white space of my words in my defined field, is "my_field". And my search needs to be case insensitive, for this feature, I used lowercase filter.
PUT my_index
{
"settings": {
"analysis": {
"analyzer": {
"my_custom_analyzer": {
"type": "custom",
"tokenizer": "whitespace",
"filter": [
"lowercase",
"trim"
]
}
}
}
}
},
"mappings" : {
"my_type" : {
"properties" : {
"my_field" : {
"type" : "string",
"analyzer" : "my_custom_analyzer"
}
}
}
}
After that this creation, I analyze my sample data:
POST my_index/_analyze
{
"analyzer": "my_custom_analyzer",
"text": "my_Sample_TEXT"
}
and the output is
{
"tokens": [
{
"token": "my_sample_text",
"start_offset": 0,
"end_offset": 14,
"type": "word",
"position": 0
}
]
}
I have many data in my documents and in "my_field" that contains "my_Sample_TEXT" but when I search for this text using query string, result returns 0:
GET my_index/_search
{
"query": {
"query_string" : {
"default_field" : "my_type",
"query" : "*my_sample_text*",
"analyzer" : "my_custom_analyzer",
"enable_position_increments": true,
"default_operator": "AND"
}
}
}
My result is:
{
"took": 9,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
I found that, this problem happens when my text has underscore and uppercase text, can anyone help me to fix this problem?
Could you try to change your mapping part of "my_filed" as below:
"my_field" : {
"type" : "string",
"analyzer" : "my_custom_analyzer"
"search_analyzer": "my_custom_analyzer"
}
Because, ES use standart analyzer when you do not set any analyzer. And standart analyzer can create multiple tokens from your underscored text.

ElasticSearch 'range' query returns inappropriate results

Lets take this query:
{
"timeout": 10000,
"from": 0,
"size": 21,
"sort": [
{
"view_avg": {
"order": "desc"
}
}
],
"query": {
"bool": {
"must": [
{
"range": {
"price": {
"from": 10,
"to": 20
}
}
},
{
"terms": {
"category_ids": [
16405
]
}
}
]
}
}
}
This query on data set that I am running on, should return no results (as all prices are in 100s-1000s range). However, this query returns results, matching prices as:
"price": "1399.00"
"price": "1299.00"
"price": "1089.00"
And so on, and so forth.. Any ideas how I could modify the query, so it returns the correct results?
I'm 99% sure your mapping is wrong and price is declared as string. Elasticsearch is using different Lucene range queries based on the field type as you can see in their documentation. The TermRangeQuery for string type acts like your output, it uses lexicographical ordering (ie. 1100 is between 10 and 20).
To test it you can try the following mapping/search:
PUT tests/
PUT tests/test/_mapping
{
"test": {
"_source" : {"enabled" : false},
"_all" : {"enabled" : false},
"properties" : {
"num" : {
"type" : "float", // <-- HERE IT'S A FLOAT
"store" : "no",
"index" : "not_analyzed"
}
}
}
}
PUT tests/test/1
{
"test" : {
"num" : 100
}
}
POST tests/test/_search
{
"query": {
"bool": {
"must": [
{
"range": {
"num": {
"from": 10,
"to": 20
}
}
}
]
}
}
}
Result:
{
"took": 12,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
If you delete the index and try to recreate it changing the num type to a string:
PUT tests/test/_mapping
{
"test": {
"_source" : {"enabled" : false},
"_all" : {"enabled" : false},
"properties" : {
"num" : {
"type" : "string", // <-- HERE IT'S A STRING
"store" : "no",
"index" : "not_analyzed"
}
}
}
}
You'll see a different result:
{
"took": 2,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "tests",
"_type": "test",
"_id": "1",
"_score": 1
}
]
}
}
price needs to be a numeric field for that must to work. If it's string it will return. Make sure the mapping is correct, if it would have been float it would have worked.
You can check the mapping of the index with GET /index_name/_mapping.
If you would have had the following (and the price is string):
"range": {
"price": {
"from": 30,
"to": 40
}
}
that shouldn't return the docs because 1 (string) is before 3 or 4 (strings), even if numerically speaking 30 is smaller than 1399.

Resources