Elastisearch query filter - elasticsearch

I am trying to make a query with a filter on my index but when I try to filter on any attribute in the mapping the query returns no result.
The query is the following, if I run just with the geo_distance part I get results. I would like to filter the results using one of the properties in the mapping (in this case rating, but it can be city, state ecc).
Query is generated in Java via QueryBuilder from elasticsearch library (v 52.0). But for now I am trying to understand how to build a working query and executing via CURL.
{
"query": {
"bool": {
"filter": [
{
"geo_distance": {
"geometry.coordinates": [
12.3232,
12.2323
],
"distance": 200000,
"distance_type": "plane",
"validation_method": "STRICT",
"ignore_unmapped": false,
"boost": 1
}
},
{
"bool": {
"must": [
{
"terms": {
"rating": [
"0"
],
"boost": 1
}
}
],
"adjust_pure_negative": true,
"boost": 1
}
}
],
"adjust_pure_negative": true,
"boost": 1
}
}
}
If I run a query filtering on zipcode or id it works.
For example a query like this:
{"query":{"bool":{"filter":{"term":{"zipCode":"111111"}}}}}
A snippet of my mapping is this
{
"my_index": {
"mappings": {
"poielement": {
"dynamic_templates": [
{
"suggestions": {
"match": "suggest_*",
"mapping": {
"analyzer": "my_analyzer",
"copy_to": "auto_suggest",
"search_analyzer": "my_analyzer",
"store": true,
"type": "text"
}
}
},
{
"integers": {
"match_mapping_type": "long",
"mapping": {
"type": "text"
}
}
},
{
"geopoint": {
"match": "coordinates",
"mapping": {
"type": "geo_point"
}
}
},
{
"property": {
"match": "*",
"mapping": {
"analyzer": "my_analyzer",
"search_analyzer": "my_analyzer"
}
}
}
],
"date_detection": false,
"numeric_detection": false,
"properties": {
"city": {
"type": "text",
"analyzer": "my_analyzer"
},
"country": {
"type": "text",
"analyzer": "my_analyzer"
},
"geometry": {
"properties": {
"coordinates": {
"type": "geo_point"
},
"type": {
"type": "text",
"analyzer": "my_analyzer"
}
}
},
"id": {
"type": "text"
},
"name": {
"type": "keyword"
},
"rating": {
"type": "text"
},
"total_rate": {
"type": "text",
"analyzer": "my_analyzer"
},
"type": {
"type": "text",
"analyzer": "my_analyzer"
},
"zipCode": {
"type": "text"
}
}
}
}
}
}
When I retrieve data via http://elasticsearchpat/my_index/_search data looks like this
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 4,
"successful": 4,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 7517,
"max_score": 1,
"hits": [
{
"_index": "my_index",
"_type": "poielement",
"_id": "58768",
"_score": 1,
"_source": {
"zipCode": 111111,
"country": "USA",
"city": "Portland",
"rating": 0,
"type": "",
"id": 123,
"geometry": {
"coordinates": [
12.205061,
12.490463
],
"type": "Point"
}
}
}
]
}
}
I will be very grateful for any help.
Thanks

Try this query instead
{
"query": {
"bool": {
"must": [
{
"match": {
"rating": 0
}
}
],
"filter": [
{
"geo_distance": {
"geometry.coordinates": [
12.3232,
12.2323
],
"distance": 200000,
"distance_type": "plane",
"validation_method": "STRICT",
"ignore_unmapped": false,
"boost": 1
}
}
],
"adjust_pure_negative": true,
"boost": 1
}
}
}

Related

ElasticSearch query relevance

I would like to find a product with the search priority : pickRef, name, synonym (it's an array) and the others after. I don"t succeed to have a working query.. I have to boost synonym with "50" in order to have the product in top 8 results...
The aim of my query is to make an autocompletion search with fuzzy (to avoid mispelling)
I have a product with the synonym "caca" When I want to search "caca" ES return every coca products. but not the product with the synonym "caca". However, the term "caca" must be the first result beceause it match perfectly with synonym field and coca products must come after (due to fuzzy parameter)
There is my index :
{
"product": {
"aliases": {},
"mappings": {
"properties": {
"brand": {
"type": "keyword",
"boost": 3
},
"catalogue": {
"type": "keyword"
},
"category": {
"type": "text",
"analyzer": "standard"
},
"description": {
"properties": {
"de": {
"type": "text",
"boost": 3,
"analyzer": "german"
},
"en": {
"type": "text",
"boost": 3,
"analyzer": "english"
},
"fr": {
"type": "text",
"boost": 3,
"analyzer": "french"
},
"lu": {
"type": "text",
"boost": 3
}
}
},
"description_ecology": {
"properties": {
"de": {
"type": "text",
"boost": 3,
"analyzer": "german"
},
"en": {
"type": "text",
"boost": 3,
"analyzer": "english"
},
"fr": {
"type": "text",
"boost": 3,
"analyzer": "french"
},
"lu": {
"type": "text",
"boost": 3
}
}
},
"enabled": {
"type": "boolean"
},
"image": {
"type": "text"
},
"name": {
"properties": {
"de": {
"type": "text",
"boost": 3,
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
},
"analyzer": "german"
},
"en": {
"type": "text",
"boost": 3,
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
},
"analyzer": "english"
},
"fr": {
"type": "text",
"boost": 3,
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
},
"analyzer": "french"
},
"lu": {
"type": "text",
"boost": 3,
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
"pickRef": {
"type": "keyword",
"boost": 5
},
"replaced": {
"type": "boolean"
},
"slug": {
"type": "text"
},
"synonym": {
"type": "keyword",
"boost": 3
}
}
},
"settings": {
"index": {
"routing": {
"allocation": {
"include": {
"_tier_preference": "data_content"
}
}
},
"number_of_shards": "1",
"provided_name": "product",
"creation_date": "1634287857507",
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "20"
}
},
"analyzer": {
"autocomplete": {
"filter": [
"lowercase",
"autocomplete_filter"
],
"type": "custom",
"tokenizer": "standard"
}
},
"char_filter": {
"pre_negs": {
"pattern": "a \\w",
"type": "pattern_replace",
"replacement": ""
}
}
},
"number_of_replicas": "0",
"uuid": "EGLmpv8bRlCnfLBxHZOKmA",
"version": {
"created": "7150099"
}
}
}
}
}
There is my query :
{
"index": "product",
"size": 8,
"body": {
"query": {
"bool": {
"must": [
{
"match": {
"enabled": true
}
},
{
"match": {
"replaced": false
}
}
],
"should": [
{
"match": {
"name.fr": {
"query": "caca",
"analyzer": "standard"
}
}
},
{
"match": {
"synonym": {
"query": "caca",
"boost": 20,
"analyzer": "standard"
}
}
},
{
"multi_match": {
"query": "caca",
"fields": [
"brand^2",
"pickRef^5",
"catalogue",
"name.fr^3",
"name.en^1",
"name.de^1",
"name.lu^1",
"description.fr^1",
"description.en^1",
"description.de^1",
"description.lu^1",
"description_ecologique.fr^1",
"description_ecologique.en^1",
"description_ecologique.de^1",
"description_ecologique.lu^1"
],
"fuzziness": "AUTO"
}
},
{
"query_string": {
"query": "caca"
}
}
]
}
}
}
}
Those are my products :
{
"_index": "product",
"_type": "_doc",
"_id": "1594",
"_version": 1,
"_seq_no": 1593,
"_primary_term": 1,
"found": true,
"_source": {
"name": {
"fr": "PLANTE ARTIFICIELLE BAMBOU 120cm"
},
"pickRef": "122638",
"description": {
"fr": "Agrémentez votre lieu de travail avec cette superbe plante ! Elle garantit un environnement très naturel, ne nécessite pas d'entretien et agrémente n'importe quel espace. Tronc en bois, feuillage en polyester , livrée dans un pot standard en plastique."
},
"description_ecology": {
"fr": ""
},
"catalogue": "P399",
"image": "uploads/product/122638/122638.png",
"brand": "PAPERFLOW",
"category": "Autres",
"slug": "plante-artificielle-bambou-120cm-122638-122638",
"enabled": true,
"synonym": [],
"replaced": false
}
}
{
"_index": "product",
"_type": "_doc",
"_id": "3131",
"_version": 1,
"_seq_no": 3130,
"_primary_term": 1,
"found": true,
"_source": {
"name": {
"fr": "ROYCO MINUTE SOUP \"POIS AU JAMBON\""
},
"pickRef": "141065",
"description": {
"fr": "Retrouvez le bon goût des légumes dans ces recettes de tradition alliant tout le savoir-faire de Royco Minute Soup à la saveur des meilleurs ingrédients."
},
"description_ecology": {
"fr": ""
},
"catalogue": "P038",
"image": "uploads/product/141065/141065.png",
"brand": "ROYCO",
"category": "Soupe & pâtes",
"slug": "royco-minute-soup-pois-au-jambon-5410056186552-141065",
"enabled": true,
"synonym": [],
"replaced": false
}
}
{
"_index": "product",
"_type": "_doc",
"_id": "6",
"_version": 2,
"_seq_no": 24511,
"_primary_term": 1,
"found": true,
"_source": {
"name": {
"fr": "AGRAFES 26/6 GALVANISEES"
},
"pickRef": "100110",
"description": {
"fr": "<div>Boîte de 1000 agrafes 26/6 galvanisées.</div>"
},
"description_ecology": {
"fr": null
},
"catalogue": "S",
"image": "uploads/product/233163/233163.png",
"brand": "autres",
"category": "Autres",
"slug": "agrafes-26-6-galvanisees-jambon-5010255827746-100110",
"enabled": true,
"synonym": [
"caca",
"jambon"
],
"replaced": false
}
}
PS : I know the example is not perfect but I don't have a better one...
do you try to sort by _score?
{
"index": "product",
"size": 8,
"body": {
"query": {
.
.
.
},
"sort": [
{
"_score": {
"order": "desc"
}
}
]
}
}

elasticsearch not returning text when entered partial word

I have my analyzers set like this:
"analyzer": {
"edgeNgram_autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": ["lowercase", "autocomplete"]
},
"full_name": {
"filter":["standard","lowercase","asciifolding"],
"type":"custom",
"tokenizer":"standard"
}
My filter:
"filter": {
"autocomplete": {
"type": "edgeNGram",
"side":"front",
"min_gram": 1,
"max_gram": 50
}
Name field analyzer:
"textbox": {
"_parent": {
"type": "document"
},
"properties": {
"text": {
"fields": {
"text": {
"type":"string",
"analyzer":"full_name"
},
"autocomplete": {
"type": "string",
"index_analyzer": "edgeNgram_autocomplete",
"search_analyzer": "full_name",
"analyzer": "full_name"
}
},
"type":"multi_field"
}
}
}
Put all together, makes up my mapping for docstore index:
PUT http://localhost:9200/docstore
{
"settings": {
"analysis": {
"analyzer": {
"edgeNgram_autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": ["lowercase", "autocomplete"]
},
"full_name": {
"filter":["standard","lowercase","asciifolding"],
"type":"custom",
"tokenizer":"standard"
}
},
"filter": {
"autocomplete": {
"type": "edgeNGram",
"side":"front",
"min_gram": 1,
"max_gram": 50
} }
}
},
"mappings": {
"space": {
"properties": {
"name": {
"type": "string",
"index": "not_analyzed"
}
}
},
"document": {
"_parent": {
"type": "space"
},
"properties": {
"name": {
"type": "string",
"index": "not_analyzed"
}
}
},
"textbox": {
"_parent": {
"type": "document"
},
"properties": {
"bbox": {
"type": "long"
},
"text": {
"fields": {
"text": {
"type":"string",
"analyzer":"full_name"
},
"autocomplete": {
"type": "string",
"index_analyzer": "edgeNgram_autocomplete",
"search_analyzer": "full_name",
"analyzer":"full_name"
}
},
"type":"multi_field"
}
}
},
"entity": {
"_parent": {
"type": "document"
},
"properties": {
"bbox": {
"type": "long"
},
"name": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
Add a space to hold all docs:
POST http://localhost:9200/docstore/space
{
"name": "Space 1"
}
When user enters word: proj
this should return, all text:
SampleProject
Sample Project
Project Name
myProjectname
firstProjectName
my ProjectName
But it returns nothing.
My query:
POST http://localhost:9200/docstore/textbox/_search
{
"query": {
"match": {
"text": "proj"
}
},
"filter": {
"has_parent": {
"type": "document",
"query": {
"term": {
"name": "1-a1-1001.pdf"
}
}
}
}
}
If I search by project, I get:
{ "took": 4,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 3.0133555,
"hits": [
{
"_index": "docstore",
"_type": "textbox",
"_id": "AVRuV2d_f4y6IKuxK35g",
"_score": 3.0133555,
"_routing": "AVRuVvtLf4y6IKuxK33f",
"_parent": "AVRuV2cMf4y6IKuxK33g",
"_source": {
"bbox": [
8750,
5362,
9291,
5445
],
"text": [
"Sample Project"
]
}
},
{
"_index": "docstore",
"_type": "textbox",
"_id": "AVRuV2d_f4y6IKuxK35Y",
"_score": 2.4106843,
"_routing": "AVRuVvtLf4y6IKuxK33f",
"_parent": "AVRuV2cMf4y6IKuxK33g",
"_source": {
"bbox": [
8645,
5170,
9070,
5220
],
"text": [
"Project Name and Address"
]
}
}
]
}
}
Maybe my edgengram is not suited for this?
I am saying:
side":"front"
Should I do it differently?
Does anyone know what I am doing wrong?
The problem is with the autocomplete indexing analyzer field name.
Change:
"index_analyzer": "edgeNgram_autocomplete"
To:
"analyzer": "edgeNgram_autocomplete"
And also search like (#Andrei Stefan) showed in his answer:
POST http://localhost:9200/docstore/textbox/_search
{
"query": {
"match": {
"text.autocomplete": "proj"
}
}
}
And it will work as expected!
I have tested your configuration on Elasticsearch 2.3
By the way, type multi_field is deprecated.
Hope I have managed to help :)
Your query should actually try to match on text.autocomplete and not text:
"query": {
"match": {
"text.autocomplete": "proj"
}
}

How to search nested filter by elasticsearch

I want to use [nested filter] function.
because, I want to make filter of nested data only.
for example, if ElasticSearch has this data.
I want to get it. but I try to use [nested filter], I can't get to this data.
do you know about good solution?
Actually, I want to have 2 condition in nested type.
this is like sql,
select * from document Inner Join on document_comments.document_id = document.id where document_comments.deleted_at is null and document_comments.comment like 'test'
[data]
"_source": {
"id": 4,
"name": "hogehoge.csv",
"deleted_at": null,
"hard_delete": null,
"document_comments": [
{
"id": 8,
"comment": "test",
"document_id": 4,
"deleted_at": "2016-03-03T13:43:10"
}
,
{
"id": 11,
"comment": "test",
"document_id": 4,
"deleted_at": null
}
]
}
[mapping]
"documents": {
"search_analyzer": "default_search",
"dynamic_templates": [
{
"string_template": {
"mapping": {
"type": "multi_field",
"fields": {
"ja": {
"analyzer": "ja_analyzer",
"index": "analyzed",
"type": "string"
},
"{name}": {
"analyzer": "ngram_analyzer",
"index": "analyzed",
"type": "string"
},
"yomi": {
"analyzer": "yomi_analyzer",
"index": "analyzed",
"type": "string"
},
"full": {
"index": "not_analyzed",
"type": "string"
}
}
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"properties": {
"#timestamp": {
"format": "dateOptionalTime",
"type": "date"
},
"document_comments": {
"type": "nested",
"properties": {
"deleted_at": {
"format": "dateOptionalTime",
"type": "date"
},
"document_id": {
"type": "integer"
},
"comment": {
"index": "no",
"type": "string",
"fields": {
"ja": {
"analyzer": "ja_analyzer",
"type": "string"
},
"yomi": {
"analyzer": "yomi_analyzer",
"type": "string"
},
"ngram": {
"analyzer": "ngram_analyzer",
"type": "string"
},
"full": {
"index": "not_analyzed",
"type": "string"
}
}
},
"id": {
"type": "long"
},
"deleted_at": {
"format": "dateOptionalTime",
"type": "date"
}
}
},
"name": {
"index": "no",
"type": "string",
"fields": {
"ja": {
"analyzer": "ja_analyzer",
"type": "string"
},
"yomi": {
"analyzer": "yomi_analyzer",
"type": "string"
},
"ngram": {
"analyzer": "ngram_analyzer",
"type": "string"
},
"full": {
"index": "not_analyzed",
"type": "string"
}
}
},
"delete_at": {
"format": "dateOptionalTime",
"type": "date"
},
"id": {
"type": "integer"
},
"hard_delete": {
"format": "dateOptionalTime",
"type": "date"
}
}
}
[query]
{
"_source": [
"id",
"deleted_at",
"document_comments.comment",
"document_comments.deleted_at"
],
"min_score": 0.05,
"query": {
"filtered": {
"query": {
"bool": {
"must": [],
"should": [
{
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": [
"document.name.ja"
]
}
},
{
"nested": {
"path": "document_comments",
"query": {
"bool": {
"must": [
{
"multi_match": {
"query": "test",
"type": "cross_fields",
"fields": [
"document_comments.comment.ja"
]
}
}
],
"must_not": [
{
"filter": {
"exists": {
"field": "document_comments.deleted_at"
}
}
}
]
}
}
}
}
]
}
},
"filter": {
"bool": {
"must": [
[
{
"missing": {
"field": "deleted_at",
"existence": "true",
"null_value": "true"
}
},
{
"missing": {
"field": "hard_delete",
"existence": "true",
"null_value": "true"
}
}
],
{
"type": {
"value": "document"
}
},
{
"term": {
"id": "3"
}
}
]
}
}
}
},
"sort": [
{
"id": "desc"
},
{
"_score": "asc"
}
]
}

ElasticSearch: Is it possible to produce a "Temporary Field" during a search request?

Sample Document:
{
"text": "this is my text",
"categories": [
{"category": "sample category"},
{"category": "local news"}
]
}
The mapping currently is:
{
"topic": {
"properties": {
"categories": {
"properties": {
"category": {
"type": "string",
"store": "no",
"term_vector": "with_positions_offsets",
"analyzer": "ik_max_word",
"search_analyzer": "ik_max_word",
"include_in_all": "true",
"boost": 8,
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
}
}
}
Search query:
{
"_source": false,
"query":{
"match":{
"categories.category":"news"
}
},
"aggs": {
"match_count": {
"terms" : {"field": "categories.category.raw"}
}
}
}
The result I want it to be:
{
...
"buckets": [
{
"key": "local news",
"doc_count": 1
}
]
...
}
The result actually is (it aggregates all matching documents' categories.category):
{
...
"buckets": [
{
"key": "local news",
"doc_count": 1
},{
"key": "sample category", //THIS PART IS NOT NEEDED
"doc_count": 1
}
]
...
}
Is it possible to add a temporary field during a search? In this case let's say name all the matching categories.category as categories.match_category, and aggregates by this temporary field categories.match_category? If true how can I do it and if not what should I do then?
You have multiple documents within your document and you need to match against some of them, you should probably change mapping into nested documents as follows:
mapping
{
"topic": {
"properties": {
"categories": {
"type":"nested",
"properties": {
"category": {
"type": "string",
"store": "no",
"term_vector": "with_positions_offsets",
"analyzer": "ik_max_word",
"search_analyzer": "ik_max_word",
"include_in_all": "true",
"boost": 8,
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
}
}
}
Then you can perform your query as follows
{
"_source": false,
"query":{
"filtered":{
"query":{
"match":{
"categories.category":
{
"query" : "news",
"cutoff_frequency" : 0.001
}
}
}
}
},
"aggs": {
"categ": {
"nested" : {
"path" : "categories"
},
"aggs":{
"match_count": {
"terms" : {"field": "categories.category.raw"}
}
}
}
}
}
Try it
Another approach but with a more specific to your needs logic is the following:
mapping
{
"topic": {
"properties": {
"categories": {
"type":"nested",
"properties": {
"category": {
"type": "string",
"store": "no",
"analyzer": "simple",
"include_in_all": "true",
"boost": 8,
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}
}
}
}
data
{
"text": "this is my text",
"categories": [
{"category": "sample category"},
{"category": "local news"}
]
}
query
{
"query":{
"nested":{
"path":"categories",
"query":{
"filtered":{
"query":{
"match":{
"categories.category":"news"
}
}
}
}
}
},
"aggs": {
"nest":{
"nested":{
"path":"categories"
},
"aggs":{
"filt":{
"filter" : {
"script": {
"script" : "doc['categories.category'].values.contains('news')"
}
},
"aggs":{
"match_count": {
"terms" : {"field": "categories.category.raw"}
}
}
}
}
}
}
}
produced result
{
"_shards": {
"failed": 0,
"successful": 5,
"total": 5
},
"aggregations": {
"nest": {
"doc_count": 2,
"filt": {
"doc_count": 1,
"match_count": {
"buckets": [
{
"doc_count": 1,
"key": "local news"
}
],
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0
}
}
}
},
"hits": {
"hits": [],
"max_score": 0.0,
"total": 1
},
"timed_out": false,
"took": 3
}
The catch here is that you have to create your own, according to your needs script filter in the aggregation, the above example worked for me with a simple analyzer in the "category" mapping

ElasticSearch: How to create a complex query & filter for nested object?

I have this mapping & query. everything is working, except when i want to filter those contents with mentioned "tagid"s. it returns zero results.
i want to filter contents based on tag ids.
{
"mappings": {
"video": {
"_all": {
"enabled": true
},
"properties": {
"title": {
"type": "string"
},
"en_title": {
"type": "string"
},
"tags": {
"type": "nested",
"properties": {
"tagname": {
"type": "string"
},
"tagid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"metadescription": {
"type": "string"
},
"author": {
"type": "string"
},
"description": {
"type": "string"
},
"items": {
"type": "nested",
"properties": {
"item_title": {
"type": "string"
},
"item_duration": {
"type": "string",
"index": "not_analyzed"
}
}
},
"isfeatured": {
"type": "string",
"index": "not_analyzed"
},
"image": {
"type": "string",
"index": "not_analyzed"
},
"contenttype": {
"type": "string",
"index": "not_analyzed"
},
"category": {
"type": "string",
"index": "not_analyzed"
},
"categoryalias": {
"type": "string",
"index": "not_analyzed"
},
"url": {
"type": "string",
"index": "not_analyzed"
},
"authorid": {
"type": "string",
"index": "not_analyzed"
},
"price": {
"type": "string",
"index": "not_analyzed"
},
"duration": {
"type": "string",
"index": "not_analyzed"
},
"publishdate": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss"
}
}
}
}
}
and this is the query:
{
"index": "content",
"type": "video",
"body": {
"query": {
"filtered": {
"query": {
"match_all": { }
},
"filter": {
"bool": {
"must": [
{
"nested": {
"path": "tags",
"query": {
"bool": {
"should": [
{
"term": {
"tagid": "193"
}
},
{
"term": {
"tagid": "194"
}
}
]
}
}
}
},
{
"term": {
"categoryalias": "digilife"
}
},
{
"term": {
"price": 0
}
}
]
}
}
}
},
"from": 0,
"size": 9,
"sort": [
"_score"
]
}
}
Your nested filter in your query is not quite correct. For the field names where you have tagid, it should be tags.tagid. Full query should be
{
"index": "content",
"type": "video",
"body": {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"bool": {
"must": [{
"nested": {
"path": "tags",
"query": {
"bool": {
"should": [{
"term": {
"tags.tagid": "193"
}
}, {
"term": {
"tags.tagid": "194"
}
}]
}
}
}
}, {
"term": {
"categoryalias": "digilife"
}
}, {
"term": {
"price": 0
}
}]
}
}
}
},
"from": 0,
"size": 9,
"sort": [
"_score"
]
}
}
EDIT:
Here's a complete working example to get you started. I have used Sense for this but you can use cURL or the language client of you choice.
For the mapping
curl -XPUT "http://localhost:9200/content" -d'
{
"mappings": {
"video": {
"_all": {
"enabled": true
},
"properties": {
"title": {
"type": "string"
},
"en_title": {
"type": "string"
},
"tags": {
"type": "nested",
"properties": {
"tagname": {
"type": "string"
},
"tagid": {
"type": "string",
"index": "not_analyzed"
}
}
},
"metadescription": {
"type": "string"
},
"author": {
"type": "string"
},
"description": {
"type": "string"
},
"items": {
"type": "nested",
"properties": {
"item_title": {
"type": "string"
},
"item_duration": {
"type": "string",
"index": "not_analyzed"
}
}
},
"isfeatured": {
"type": "string",
"index": "not_analyzed"
},
"image": {
"type": "string",
"index": "not_analyzed"
},
"contenttype": {
"type": "string",
"index": "not_analyzed"
},
"category": {
"type": "string",
"index": "not_analyzed"
},
"categoryalias": {
"type": "string",
"index": "not_analyzed"
},
"url": {
"type": "string",
"index": "not_analyzed"
},
"authorid": {
"type": "string",
"index": "not_analyzed"
},
"price": {
"type": "string",
"index": "not_analyzed"
},
"duration": {
"type": "string",
"index": "not_analyzed"
},
"publishdate": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss"
}
}
}
}
}'
We can check the mapping is as expected with
curl -XGET "http://localhost:9200/content/video/_mapping"
Now, let's index some documents into the index
// document with id 1
curl -XPOST "http://localhost:9200/content/video/1" -d'
{
"tags": [
{
"tagname" : "tag 193",
"tagid": "193"
}
],
"price": 0,
"categoryalias": "digilife"
}'
// document with id 2
curl -XPOST "http://localhost:9200/content/video/2" -d'
{
"tags": [
{
"tagname" : "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "digilife"
}'
// document with id 3
curl -XPOST "http://localhost:9200/content/video/3" -d'
{
"tags": [
{
"tagname" : "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "different category alias"
}'
Now, let's run the query. I've removed the superfluous parts of the query and simplified it
curl -XGET "http://localhost:9200/content/video/_search" -d'
{
"query": {
"filtered": {
"filter": {
"bool": {
"must": [
{
"nested": {
"path": "tags",
"query": {
"terms": {
"tags.tagid": [
"193",
"194"
]
}
}
}
},
{
"term": {
"categoryalias": "digilife"
}
},
{
"term": {
"price": 0
}
}
]
}
}
}
},
"size": 9
}'
Only documents with ids 1 and 2 should be returned. This is confirmed with the results
{
"took": 1,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 2,
"max_score": 1,
"hits": [
{
"_index": "content",
"_type": "video",
"_id": "1",
"_score": 1,
"_source": {
"tags": [
{
"tagname": "tag 193",
"tagid": "193"
}
],
"price": 0,
"categoryalias": "digilife"
}
},
{
"_index": "content",
"_type": "video",
"_id": "2",
"_score": 1,
"_source": {
"tags": [
{
"tagname": "tag 194",
"tagid": "194"
}
],
"price": 0,
"categoryalias": "digilife"
}
}
]
}
}

Resources