I want to get all Entities from nested JSON Data where the "ai_id" has the Value = 0 - elasticsearch

i have this bellow JSON Data, and i want to write a Query in Elasticsearch , the Query is ,
(Give me all Entities where the "ai_id" has the Value = 0 ).
the JSON Data ist :
{
"_index": "try1",
"_type": "_doc",
"_id": "2",
"_score": 1,
"_source": {
"target": {
"br_id": 0,
"an_id": 0,
"ai_id": 0,
"explanation": [
"element 1",
"element 2"
]
},
"process": {
"an_id": 1311,
"pa_name": "micha"
},
"text": "hello world"
}
},
{
"_index": "try1",
"_type": "_doc",
"_id": "1",
"_score": 1,
"_source": {
"target": {
"br_id": 0,
"an_id": 1,
"ai_id": 1,
"explanation": [
"element 3",
"element 4"
]
},
"process": {
"an_id": 1311,
"pa_name": "luca"
},
"text": "the all People are good"
}
}
]
}
}
I tried this but seems not to Work , Please any Help i will be thankfull.
GET try1\_search
{
"query":{
{ "match_all": { "ai_id": 0}}
}
}
and this did not work too,
GET try1/_search
{
"query": {
"nested" : {
"query" : {
"must" : [
{ "match" : {"ai_id" : 0} }
]
}
}
}
}
Please an Suggestion .
thx

You need to query nested on your target object like this-
GET /try1/_search
{
"query": {
"nested" : {
"path" : "target",
"query" : {
"bool" : {
"must" : [
{ "match" : {"target.ai_id" : 0} }
]
}
}
}
}
}
Ref. https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-nested-query.html

Related

elasticsearch - search query - ignore order

I'm using a query like
{bool: {must: [{match: {name: "Cat Dog"}}]
This gives me records with name e.g. "Cat Dog Cow" but not e.g. "Cat Cow Dog".
As I read here solutions for it can be used span_near, is this the only way?
I tried query such as :
{"query":{"bool":{"must":[],"must_not":[],"should":[{"span_near":{"slop":12,"in_order":false,"clauses":[{"span_term":{"name":"Cat"}},{"span_term":{"name":"Dog"}}]}}]}}}
But this gives me 0 hits. What can be the issue?
The match query returns documents that match a provided text, the provided text is analyzed before matching.
Adding a working example
Index mapping:
{
"mappings": {
"properties": {
"name": {
"type": "text"
}
}
}
}
Search Query:
{
"query": {
"match": {
"name": {
"query": "Cat Dog"
}
}
}
}
Search Result:
"hits": [
{
"_index": "65230619",
"_type": "_doc",
"_id": "1",
"_score": 0.36464313,
"_source": {
"name": "Cat Dog Cow"
}
},
{
"_index": "65230619",
"_type": "_doc",
"_id": "2",
"_score": 0.36464313,
"_source": {
"name": "Cat Cow Dog"
}
}
]
Search Query using span_near
{
"query": {
"span_near" : {
"clauses" : [
{ "span_term" : { "name" : "cat" } },
{ "span_term" : { "name" : "dog" } }
],
"slop" : 12,
"in_order" : false
}
}
}

Filter elastic search data when fields contain ~

I have bunch of documents like below. I want to filter the data where projectkey starts with ~.
I did read some articles which says ~ is an operator in Elastic query so cannot really filter with that.
Can someone help to form the search query for /branch/_search API ??
{
"_index": "branch",
"_type": "_doc",
"_id": "GAz-inQBJWWbwa_v-l9e",
"_version": 1,
"_score": null,
"_source": {
"branchID": "refs/heads/feature/12345",
"displayID": "feature/12345",
"date": "2020-09-14T05:03:20.137Z",
"projectKey": "~user",
"repoKey": "deploy",
"isDefaultBranch": false,
"eventStatus": "CREATED",
"user": "user"
},
"fields": {
"date": [
"2020-09-14T05:03:20.137Z"
]
},
"highlight": {
"projectKey": [
"~#kibana-highlighted-field#user#/kibana-highlighted-field#"
],
"projectKey.keyword": [
"#kibana-highlighted-field#~user#/kibana-highlighted-field#"
],
"user": [
"#kibana-highlighted-field#user#/kibana-highlighted-field#"
]
},
"sort": [
1600059800137
]
}
UPDATE***
I used prerana's answer below to use -prefix in my query
Something is still wrong when i use prefix and range - i get below error - What am i missing ??
GET /branch/_search
{
"query": {
"prefix": {
"projectKey": "~"
},
"range": {
"date": {
"gte": "2020-09-14",
"lte": "2020-09-14"
}
}
}
}
{
"error": {
"root_cause": [
{
"type": "parsing_exception",
"reason": "[prefix] malformed query, expected [END_OBJECT] but found [FIELD_NAME]",
"line": 6,
"col": 5
}
],
"type": "parsing_exception",
"reason": "[prefix] malformed query, expected [END_OBJECT] but found [FIELD_NAME]",
"line": 6,
"col": 5
},
"status": 400
}
If I understood your issue well, I suggest the creation of a custom analyzer to search the special character ~.
I did a test locally as follows while replacing ~ to __SPECIAL__ :
I created an index with a custom char_filter alongside with the addition of a field to the projectKey field. The name of the new multi_field is special_characters.
Here is the mapping:
PUT wildcard-index
{
"settings": {
"analysis": {
"char_filter": {
"special-characters-replacement": {
"type": "mapping",
"mappings": [
"~ => __SPECIAL__"
]
}
},
"analyzer": {
"special-characters-analyzer": {
"tokenizer": "standard",
"char_filter": [
"special-characters-replacement"
]
}
}
}
},
"mappings": {
"properties": {
"projectKey": {
"type": "text",
"fields": {
"special_characters": {
"type": "text",
"analyzer": "special-characters-analyzer"
}
}
}
}
}
}
Then I ingested the following contents in the index:
"projectKey": "content1 ~"
"projectKey": "This ~ is a content"
"projectKey": "~ cars on the road"
"projectKey": "o ~ngram"
Then, the query was:
GET wildcard-index/_search
{
"query": {
"match": {
"projectKey.special_characters": "~"
}
}
}
The response was:
"hits" : [
{
"_index" : "wildcard-index",
"_type" : "_doc",
"_id" : "h1hKmHQBowpsxTkFD9IR",
"_score" : 0.43250346,
"_source" : {
"projectKey" : "content1 ~"
}
},
{
"_index" : "wildcard-index",
"_type" : "_doc",
"_id" : "iFhKmHQBowpsxTkFFNL5",
"_score" : 0.3034693,
"_source" : {
"projectKey" : "This ~ is a content"
}
},
{
"_index" : "wildcard-index",
"_type" : "_doc",
"_id" : "-lhKmHQBowpsxTkFG9Kg",
"_score" : 0.3034693,
"_source" : {
"projectKey" : "~ cars on the road"
}
}
]
Please let me know If you have any issue, I will be glad to help you.
Note: This method works if there is a blank space after the ~. You can see from the response that the 4th data was not displayed.
while #hansley answer would work, but it requires you to create a custom analyzer and still as you mentioned you want to get only the docs which starts with ~ but in his result I see all the docs containing ~, so providing my answer which requires very less configuration and works as required.
Index mapping default, so just index below docs and ES will create a default mapping with .keyword field for all text field
Index sample docs
{
"title" : "content1 ~"
}
{
"title" : "~ staring with"
}
{
"title" : "in between ~ with"
}
Search query should fetch obly 2nd docs from sample docs
{
"query": {
"prefix" : { "title.keyword" : "~" }
}
}
And search result
"hits": [
{
"_index": "pre",
"_type": "_doc",
"_id": "2",
"_score": 1.0,
"_source": {
"title": "~ staring with"
}
}
]
Please refer prefix query for more info
Update 1:
Index Mapping:
{
"mappings": {
"properties": {
"date": {
"type": "date"
}
}
}
}
Index Data:
{
"date": "2015-02-01",
"title" : "in between ~ with"
}
{
"date": "2015-01-01",
"title": "content1 ~"
}
{
"date": "2015-02-01",
"title" : "~ staring with"
}
{
"date": "2015-02-01",
"title" : "~ in between with"
}
Search Query:
{
"query": {
"bool": {
"must": [
{
"prefix": {
"title.keyword": "~"
}
},
{
"range": {
"date": {
"lte": "2015-02-05",
"gte": "2015-01-11"
}
}
}
]
}
}
}
Search Result:
"hits": [
{
"_index": "stof_63924930",
"_type": "_doc",
"_id": "2",
"_score": 2.0,
"_source": {
"date": "2015-02-01",
"title": "~ staring with"
}
},
{
"_index": "stof_63924930",
"_type": "_doc",
"_id": "4",
"_score": 2.0,
"_source": {
"date": "2015-02-01",
"title": "~ in between with"
}
}
]

How to add a user defined field and value to an elasticsearch query

Goal: I want a query which adds a discriminator field to distinguish between fuzzy results and non-fuzzy results.
Consider these documents:
curl -X POST "localhost:9200/_bulk" -H 'Content-Type: application/json' -d'
{
"index": {
"_index": "dishes",
"_type": "dish",
"_id": "1"
}
}
{
"name": "butter chicken"
}
{
"index": {
"_index": "dishes",
"_type": "dish",
"_id": "2"
}
}
{
"name": "chicken burger"
}
'
Consider the following query:
curl -X POST "localhost:9200/dishes/_search?pretty" -H 'Content-Type: application/json' -d'
{
"query": {
"bool": {
"should": [
{
"term": {
"name": "burger"
}
},
{
"fuzzy": {
"name": {
"value": "burger"
}
}
}
],
"minimum_should_match": 1,
"boost": 1.0
}
}
}
'
Can I have a result with an additional tag created during query (it is not in the document) that can be used to discriminate between what is a fuzzy result and what is a non-fuzzy result.
...
"hits" : [
{
"_index" : "dishes",
"_type" : "dish",
"_id" : "2",
"_score" : 1.3862942,
"_source" : {
"name" : "chicken burger"
},
"is_fuzzy": false
},
{
"_index" : "dishes",
"_type" : "dish",
"_id" : "1",
"_score" : 0.46209806,
"_source" : {
"name" : "butter chicken"
},
"is_fuzzy": true
}
]
Scripted fields could have been ideal. But no luck yet.
I have a requirement to present the non-fuzzy results before fuzzy results. So sorting on is_fuzzy and then _score is guaranteed to work. (The actual query is more complex.)
sort: [
{
"is_fuzzy": {
"order": "desc"
}
},
{
"_score": {
"order": "desc"
}
}
One more option is to use named queries but your term filters will need to be slightly reworked:
GET dishes/_search
{
"query": {
"bool": {
"should": [
{
"term": {
"name": {
"value": "burger",
"_name": "not_fuzzy"
}
}
},
{
"fuzzy": {
"name": {
"value": "burger",
"_name": "fuzzy"
}
}
}
],
"minimum_should_match": 1,
"boost": 1
}
}
}
yielding
[
{
"_index":"dishes",
"_type":"dish",
"_id":"2",
"_score":1.3862944,
"_source":{
"name":"chicken burger"
},
"matched_queries":[ <---
"fuzzy",
"not_fuzzy"
]
},
{
"_index":"dishes",
"_type":"dish",
"_id":"1",
"_score":0.46209806,
"_source":{
"name":"butter chicken"
},
"matched_queries":[ <---
"fuzzy"
]
}
]

ElasticSearch: Avg aggregation for datetime format

I am stuck regarding an elastic search query using python
I have data such as:
{
"_index": "user_log",
"_type": "logs",
"_id": "gdUJpXIBAoADuwvHTK29",
"_score": 1,
"_source": {
"user_name": "prathameshsalap#gmail.com",
"working_hours": "2019-10-21 09:00:01",
}
{
"_index": "user_log",
"_type": "logs",
"_id": "gtUJpXIBAoADuwvHTK29",
"_version": 1,
"_score": 0,
"_source": {
"user_name": "vaishusawant143#gmail.com",
"working_hours": "2019-10-21 09:15:01",
}
{
"_index": "user_log",
"_type": "logs",
"_id": "g9UJpXIBAoADuwvHTK29",
"_version": 1,
"_score": 0,
"_source": {
"user_name": "prathameshsalap#gmail.com",
"working_hours": "2019-10-22 07:50:00",
}
{
"_index": "user_log",
"_type": "logs",
"_id": "g8UJpXIBAoADuwvHTK29",
"_version": 1,
"_score": 0,
"_source": {
"user_name": "vaishusawant143#gmail.com",
"working_hours": "2019-10-22 04:15:01",
}
Here, for each user give working hours for different date(21 and 22). I want to take an average of each user's working hours.
{
"size": 0,
"query" : {"match_all": {}},
"aggs": {
"users": {
"terms": {
"field": "user_name"
},
"aggs": {
"avg_hours": {
"avg": {
"field": "working_hours"
}
}
}
}
}
}
This query not working. How to find the average working hours for each user for all dates? And, I also want to run this query using python-elastic search.
Updated
When I use ingest pipeline as #Val mention. I am getting an error:
{
"error" : {
"root_cause" : [
{
"type" : "script_exception",
"reason" : "compile error",
"processor_type" : "script",
"script_stack" : [
"\n def workDate = /\\s+/.split(ctx.working_h ...",
" ^---- HERE"
],
"script" : "\n def workDate = /\\s+/.split(ctx.working_hours);\n def workHours = /:/.split(workDate[1]);\n ctx.working_minutes = (Integer.parseInt(workHours[0]) * 60) + Integer.parseInt(workHours[1]);\n ",
"lang" : "painless",
"position" : {
"offset" : 24,
"start" : 0,
"end" : 49
}
}
.....
How can I solve it?
The problem is that your working_hours field is a point in time and does not denote a duration.
For this use case, it's best to store the working day and working hours in two separate fields and store the working hours in minutes.
So instead of having documents like this:
{
"user_name": "prathameshsalap#gmail.com",
"working_hours": "2019-10-21 09:00:01",
}
Create documents like this:
{
"user_name": "prathameshsalap#gmail.com",
"working_day": "2019-10-21",
"working_hours": "09:00:01",
"working_minutes": 540
}
Then you can use your query on the working_minutes field:
{
"size": 0,
"query" : {"match_all": {}},
"aggs": {
"users": {
"terms": {
"field": "user_name.keyword",
"order": {
"avg_hours": "desc"
}
},
"aggs": {
"avg_hours": {
"avg": {
"field": "working_minutes"
}
}
}
}
}
}
If it is not convenient to compute the working_minutes field in your client code, you can achieve the same thing using an ingest pipeline. Let's define the pipeline first:
PUT _ingest/pipeline/working-hours
{
"processors": [
{
"dissect": {
"field": "working_hours",
"pattern": "%{?date} %{tmp_hours}:%{tmp_minutes}:%{?seconds}"
}
},
{
"convert": {
"field": "tmp_hours",
"type": "integer"
}
},
{
"convert": {
"field": "tmp_minutes",
"type": "integer"
}
},
{
"script": {
"source": """
ctx.working_minutes = (ctx.tmp_hours * 60) + ctx.tmp_minutes;
"""
}
},
{
"remove": {
"field": [
"tmp_hours",
"tmp_minutes"
]
}
}
]
}
Then you need to update your Python client code to use the new pipeline that will create the working_hours field for you:
helpers.bulk(es, reader, index='user_log', doc_type='logs', pipeline='working-hours')

Grouping consecutive documents with Elasticsearch

Is there a way to make Elasticsearch consider sequence-gaps when grouping?
Provided that the following data was bulk-imported to Elasticsearch:
{ "index": { "_index": "test", "_type": "groupingTest", "_id": "1" } }
{ "sequence": 1, "type": "A" }
{ "index": { "_index": "test", "_type": "groupingTest", "_id": "2" } }
{ "sequence": 2, "type": "A" }
{ "index": { "_index": "test", "_type": "groupingTest", "_id": "3" } }
{ "sequence": 3, "type": "B" }
{ "index": { "_index": "test", "_type": "groupingTest", "_id": "4" } }
{ "sequence": 4, "type": "A" }
{ "index": { "_index": "test", "_type": "groupingTest", "_id": "5" } }
{ "sequence": 5, "type": "A" }
Is there a way to query this data in a way that
the documents with sequence number 1 and 2 go to one output group,
the document with sequence number 3 goes to another one, and
the documents with sequence number 4 and 5 go to a third group?
... considering the fact that the type A sequence is interrupted by a type B item (or any other item that's not type A)?
I would like result buckets to look something like this (name and value for sequence_group may be different - just trying to illustrated the logic):
"buckets": [
{
"key": "a",
"sequence_group": 1,
"doc_count": 2
},
{
"key": "b",
"sequence_group": 3,
"doc_count": 1
},
{
"key": "a",
"sequence_group": 4,
"doc_count": 2
}
]
There is a good description of the problem and some SQL solution-approaches at https://www.simple-talk.com/sql/t-sql-programming/the-sql-of-gaps-and-islands-in-sequences/. I would like to know if there is a solution for elasticsearch available as well.
We can use Scripted Metric Aggregation here which works in a map-reduce fashion (Ref link). It has different parts like init, map, combine and reduce. And, the good thing is that the result of all of these could be a list or map too.
I played around a bit on this.
ElasticSearch version used: 7.1
Creating index:
PUT test
{
"mappings": {
"properties": {
"sequence": {
"type": "long"
},
"type": {
"type": "text",
"fielddata": true
}
}
}
}
Bulk indexing: (Note that I removed mapping type 'groupingTest')
POST _bulk
{ "index": { "_index": "test", "_id": "1" } }
{ "sequence": 1, "type": "A" }
{ "index": { "_index": "test", "_id": "2" } }
{ "sequence": 2, "type": "A" }
{ "index": { "_index": "test", "_id": "3" } }
{ "sequence": 3, "type": "B" }
{ "index": { "_index": "test", "_id": "4" } }
{ "sequence": 4, "type": "A" }
{ "index": { "_index": "test", "_id": "5" } }
{ "sequence": 5, "type": "A" }
Query
GET test/_doc/_search
{
"size": 0,
"aggs": {
"scripted_agg": {
"scripted_metric": {
"init_script": """
state.seqTypeArr = [];
""",
"map_script": """
def seqType = doc.sequence.value + '_' + doc['type'].value;
state.seqTypeArr.add(seqType);
""",
"combine_script": """
def list = [];
for(seqType in state.seqTypeArr) {
list.add(seqType);
}
return list;
""",
"reduce_script": """
def fullList = [];
for(agg_value in states) {
for(x in agg_value) {
fullList.add(x);
}
}
fullList.sort((a,b) -> a.compareTo(b));
def result = [];
def item = new HashMap();
for(int i=0; i<fullList.size(); i++) {
def str = fullList.get(i);
def index = str.indexOf("_");
def ch = str.substring(index+1);
def val = str.substring(0, index);
if(item["key"] == null) {
item["key"] = ch;
item["sequence_group"] = val;
item["doc_count"] = 1;
} else if(item["key"] == ch) {
item["doc_count"] = item["doc_count"] + 1;
} else {
result.add(item);
item = new HashMap();
item["key"] = ch;
item["sequence_group"] = val;
item["doc_count"] = 1;
}
}
result.add(item);
return result;
"""
}
}
}
}
And, finally the output:
{
"took" : 21,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : {
"value" : 5,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
},
"aggregations" : {
"scripted_agg" : {
"value" : [
{
"doc_count" : 2,
"sequence_group" : "1",
"key" : "a"
},
{
"doc_count" : 1,
"sequence_group" : "3",
"key" : "b"
},
{
"doc_count" : 2,
"sequence_group" : "4",
"key" : "a"
}
]
}
}
}
Please note that scripted aggregation impacts a lot on the performance of the query. So, you might notice some slowness if there is a large no of documents.
You can always do an terms aggregation and then apply tops hit aggregation to get this.
{
"aggs": {
"types": {
"terms": {
"field": "type"
},
"aggs": {
"groups": {
"top_hits": {
"size": 10
}
}
}
}
}
}

Resources