Output not rendering using a method - methods

Here is what I want to accomplish:
<span>{{ getGenres(movie.genre_ids) }}</span>
should output:
Action, Adventure, ..etc
I've got a JSON file which contains the following structure:
[
{
"id": 28,
"name": "Action"
},
{
"id": 12,
"name": "Adventure"
}
]
Here is my method:
getGenres(genre_ids) {
Movies.getGenres(genre_ids);
}
Movies.js contains the method is calling to, which is:
getGenres(genre_ids) {
let genres_array = [];
for (let i = 0; i < genre_ids.length; i++) {
let matching_genre = genres.filter(genre => genre.id === genre_ids[i]);
genres_array.push(matching_genre[0].name);
}
return genres_array;
}
The issue here is that it doesn't output the names, but if I console.log(genres_array); it does work.
Any help would be appreciated.

I cannot find return here
getGenres(genre_ids) {
Movies.getGenres(genre_ids);
}
I trust it should be like this
getGenres(genre_ids) {
return Movies.getGenres(genre_ids).join(', ');
}

Related

Spring Boot Mongo update nested array of documents

I'm trying to set an attribute of a document inside an array to uppercase.
This is a document example
{
"_id": ObjectId("5e786a078bc3b3333627341e"),
"test": [
{
"itemName": "alpha305102992",
"itemNumber": ""
},
{
"itemName": "beta305102630",
"itemNumber": "P5000"
},
{
"itemName": "gamma305102633 ",
"itemNumber": ""
}]
}
I already tried a lot of thing.
private void NameElementsToUpper() {
AggregationUpdate update = AggregationUpdate.update();
//This one does not work
update.set("test.itemName").toValue(StringOperators.valueOf(test.itemName).toUpper());
//This one also
update.set(SetOperation.set("test.$[].itemName").withValueOfExpression("test.#this.itemName"));
//And every variant in between these two.
// ...
Query query = new Query();
UpdateResult result = mongoTemplate.updateMulti(query, update, aClass.class);
log.info("updated {} records", result.getModifiedCount());
}
I see that Fields class in spring data is hooking into the "$" char and behaving special if you mention it. Do not seem to find the correct documentation.
EDIT: Following update seems to work but I do not seem to get it translated into spring-batch-mongo code
db.collection.update({},
[
{
$set: {
"test": {
$map: {
input: "$test",
in: {
$mergeObjects: [
"$$this",
{
itemName: {
$toUpper: "$$this.itemName"
}
}
]
}
}
}
}
}
])
Any solutions?
Thanks!
For now I'm using which does what i need. But a spring data way would be cleaner.
mongoTemplate.getDb().getCollection(mongoTemplate.getCollectionName(Application.class)).updateMany(
new BasicDBObject(),
Collections.singletonList(BasicDBObject.parse("""
{
$set: {
"test": {
$map: {
input: "$test",
in: {
$mergeObjects: [
"$$this",
{
itemName: { $toUpper: "$$this.itemName" }
}
]
}
}
}
}
}
"""))
);

Mongodb aggregation remove null values from object with nested properties

Is there a way to remove literally all null or empty string values from an object? We have an aggregation which creates an object with empty fields and empty objects should the value be null.
What we wish to do is remove all null properties and empty objects and recreate the object, in order to keep the data as small as possible.
e.g. in the following object, only 'test' and 'more-nested-data' should be taken into account, the rest can be removed
{
"test": "some",
"test2": {
},
"test3": {
"some-key": {
},
"some-other-key": {
"more-nested-data": true,
"more-nested-emtpy": null
}
}
}
which should become:
{
"test": "some",
"test3": {
"some-other-key": {
"more-nested-data": true
}
}
}
I tried a lot, but I think by using objectToArray that something could be done, but I have not found the solution yet. The required aggregation should need to recursively (or by defined levels) remove null properties and empty objects.
Use the $function operator available in 4.4 (Aug 2021) to do this recursively as you note. Given this input which is a slightly expanded version of that supplied in the question:
var dd = {
"test": "some",
"test2": { },
"test3": {
"some-key": { },
"some-other-key": {
"more-nested-data": true,
"more-nested-emtpy": null,
"emptyArr": [],
"notEmptyArr": [
"XXX",
null,
{"corn":"dog"},
{"bad":null},
{"other": {zip:null, empty:[], zap:"notNull"}}
]
}
}
}
db.foo.insert(dd);
then this pipeline:
db.foo.aggregate([
{$replaceRoot: {newRoot: {$function: {
body: function(obj) {
var process = function(holder, spot, value) {
var remove_it = false;
// test FIRST since [] instanceof Object is true!
if(Array.isArray(value)) {
// walk BACKWARDS due to potential splice() later
// that will change the length...
for(var jj = value.length - 1; jj >= 0; jj--) {
process(value, jj, value[jj]);
}
if(0 == value.length) {
remove_it = true;
}
} else if(value instanceof Object) {
walkObj(value);
if(0 == Object.keys(value).length) {
remove_it = true;
}
} else {
if(null == value) {
remove_it = true;
}
}
if(remove_it) {
if(Array.isArray(holder)) {
holder.splice(spot,1); // snip out the val
} else if(holder instanceof Object) {
delete holder[spot];
}
}
};
var walkObj = function(obj) {
Object.keys(obj).forEach(function(k) {
process(obj, k, obj[k]);
});
}
walkObj(obj); // entry point!
return obj;
},
args: [ "$$CURRENT" ],
lang: "js"
}}
}}
]);
produces this result:
{
"_id" : 0,
"test" : "some",
"test3" : {
"some-other-key" : {
"more-nested-data" : true,
"notEmptyArr" : [
"XXX",
{
"corn" : "dog"
},
{
"other" : {
"zap" : "notNull"
}
}
]
}
}
}
A convenient way to debug such complex functions is by declaring them as variables outside of the pipeline and running data through them to simulate the documents (objects) coming out the database, e.g.:
ff = function(obj) {
var process = function(holder, spot, value) {
var remove_it = false;
// test FIRST since [] instanceof Object is true!
if(Array.isArray(value)) {
...
printjson(ff(dd)); // use the same doc as above
You can put print and other debugging aids into the code and then when you are done, you can remove them and call the pipeline to process the real data as follows:
db.foo.aggregate([
{$replaceRoot: {newRoot: {$function: {
body: ff, // substitute here!
args: [ "$$CURRENT" ],
lang: "js"
}}
}}
]);
Sounds like the unwind operator would help. Checkout the unwind operator at https://docs.mongodb.com/manual/reference/operator/aggregation/unwind/

Elasticsearch ingest pipeline: how to recursively modify values in a HashMap

Using an ingest pipeline, I want to iterate over a HashMap and remove underscores from all string values (where underscores exist), leaving underscores in the keys intact. Some values are arrays that must further be iterated over to do the same modification.
In the pipeline, I use a function to traverse and modify the values of a Collection view of the HashMap.
PUT /_ingest/pipeline/samples
{
"description": "preprocessing of samples.json",
"processors": [
{
"script": {
"tag": "remove underscore from sample_tags values",
"source": """
void findReplace(Collection collection) {
collection.forEach(element -> {
if (element instanceof String) {
element.replace('_',' ');
} else {
findReplace(element);
}
return true;
})
}
Collection samples = ctx.samples;
samples.forEach(sample -> { //sample.sample_tags is a HashMap
Collection sample_tags = sample.sample_tags.values();
findReplace(sample_tags);
return true;
})
"""
}
}
]
}
When I simulate the pipeline ingestion, I find the string values are not modified. Where am I going wrong?
POST /_ingest/pipeline/samples/_simulate
{
"docs": [
{
"_index": "samples",
"_id": "xUSU_3UB5CXFr25x7DcC",
"_source": {
"samples": [
{
"sample_tags": {
"Entry_A": [
"A_hyphentated-sample",
"sample1"
],
"Entry_B": "A_multiple_underscore_example",
"Entry_C": [
"sample2",
"another_example_with_underscores"
],
"Entry_E": "last_example"
}
}
]
}
}
]
}
\\Result
{
"docs" : [
{
"doc" : {
"_index" : "samples",
"_type" : "_doc",
"_id" : "xUSU_3UB5CXFr25x7DcC",
"_source" : {
"samples" : [
{
"sample_tags" : {
"Entry_E" : "last_example",
"Entry_C" : [
"sample2",
"another_example_with_underscores"
],
"Entry_B" : "A_multiple_underscore_example",
"Entry_A" : [
"A_hyphentated-sample",
"sample1"
]
}
}
]
},
"_ingest" : {
"timestamp" : "2020-12-01T17:29:52.3917165Z"
}
}
}
]
}
Here is a modified version of your script that will work on the data you provided:
PUT /_ingest/pipeline/samples
{
"description": "preprocessing of samples.json",
"processors": [
{
"script": {
"tag": "remove underscore from sample_tags values",
"source": """
String replaceString(String value) {
return value.replace('_',' ');
}
void findReplace(Map map) {
map.keySet().forEach(key -> {
if (map[key] instanceof String) {
map[key] = replaceString(map[key]);
} else {
map[key] = map[key].stream().map(this::replaceString).collect(Collectors.toList());
}
});
}
ctx.samples.forEach(sample -> {
findReplace(sample.sample_tags);
return true;
});
"""
}
}
]
}
The result looks like this:
{
"samples" : [
{
"sample_tags" : {
"Entry_E" : "last example",
"Entry_C" : [
"sample2",
"another example with underscores"
],
"Entry_B" : "A multiple underscore example",
"Entry_A" : [
"A hyphentated-sample",
"sample1"
]
}
}
]
}
You were on the right path but you were working on copies of values and weren't setting the modified values back onto the document context ctx which is eventually returned from the pipeline. This means you'll need to keep track of the current iteration indexes -- so for the array lists, as for the hash maps and everything in between -- so that you can then target the fields' positions in the deeply nested context.
Here's an example taking care of strings and (string-only) array lists. You'll need to extend it to handle hash maps (and other types) and then perhaps extract the whole process into a separate function. But AFAIK you cannot return multiple data types in Java so it may be challenging...
PUT /_ingest/pipeline/samples
{
"description": "preprocessing of samples.json",
"processors": [
{
"script": {
"tag": "remove underscore from sample_tags values",
"source": """
ArrayList samples = ctx.samples;
for (int i = 0; i < samples.size(); i++) {
def sample = samples.get(i).sample_tags;
for (def entry : sample.entrySet()) {
def key = entry.getKey();
def val = entry.getValue();
def replaced_val;
if (val instanceof String) {
replaced_val = val.replace('_',' ');
} else if (val instanceof ArrayList) {
replaced_val = new ArrayList();
for (int j = 0; j < val.length; j++) {
replaced_val.add(val[j].replace('_',' '));
}
}
// else if (val instanceof HashMap) {
// do your thing
// }
// crucial part
ctx.samples[i][key] = replaced_val;
}
}
"""
}
}
]
}

loopback REST API filter by nested data

I would like to filter from REST API by nested data. For example this object:
[
{
"name": "Handmade Soft Fish",
"tags": "Rubber, Rubber, Salad",
"categories": [
{
"name": "women",
"id": 2,
"parent_id": 0,
"permalink": "/women"
},
{
"name": "kids",
"id": 3,
"parent_id": 0,
"permalink": "/kids"
}
]
},
{
"name": "Tasty Rubber Soap",
"tags": "Granite, Granite, Chair",
"categories": [
{
"name": "kids",
"id": 3,
"parent_id": 0,
"permalink": "/kids"
}
]
}
]
is comming by GET /api/products?filter[include]=categories
and i would like to get only products which has category name "women". How do this?
LoopBack does not support filters based on related models.
This is a limitation that we have never had bandwidth to solve, unfortunately :(
For more details, see the discussion and linked issues here:
Filter on level 2 properties: https://github.com/strongloop/loopback/issues/517
Filter by properties of related models (use SQL JOIN in queries): https://github.com/strongloop/loopback/issues/683
Maybe you want to get this data by the Category REST API. For example:
GET /api/categories?filter[include]=products&filter[where][name]=woman
The result will be a category object with all products related. To this, will be necessary declare this relation on the models.
Try like this.It has worked for me.
const filter = {
where: {
'categories.name': {
inq: ['women']**strong text**
}
}
};
Pass this filter to request as path parameters and the request would be like bellow
GET /api/categoriesfilter=%7B%22where%22:%7B%categories.name%22:%7B%22inq%22:%5B%women%22%5D%7D%7D%7D
Can you share how it looks like without filter[include]=categorie, please ?
[edit]
after a few questions in comment, I'd build a remote method : in common/models/myModel.js (inside the function) :
function getItems(filter, categorieIds = []) {
return new Promise((resolve, reject) => {
let newInclude;
if (filter.hasOwnProperty(include)){
if (Array.isArray(filter.include)) {
newInclude = [].concat(filter.include, "categories")
}else{
if (filter.include.length > 0) {
newInclude = [].concat(filter.include, "categories");
}else{
newInclude = "categories";
}
}
}else{
newInclude = "categories";
}
myModel.find(Object.assign({}, filter, {include: newInclude}))
.then(data => {
if (data.length <= 0) return resolve(data);
if (categoriesIds.length <= 0) return resolve(data);
// there goes your specific filter on categories
const tmp = data.filter(
item => item.categories.findIndex(
categorie => categorieIds.indexOf(categorie.id) > -1
) > -1
);
return resolve(tmp);
})
}
}
myModel.remoteMethod('getItems', {
accepts: [{
arg: "filter",
type: "object",
required: true
}, {
arg: "categorieIds",
type: "array",
required: true
}],
returns: {arg: 'getItems', type: 'array'}
});
I hope it answers your question...

Python: Searching a multi-dimensional dictionary for a key

I'm using Python's JSON decoding library with Google Maps API. I am trying to obtain the zip code of an address but it sometimes resides in different dictionary key. Here are two examples (I've trimmed the JSON to what is relevant):
placemark1 = {
"AddressDetails": {
"Country": {
"AdministrativeArea": {
"SubAdministrativeArea": {
"Locality": {
"PostalCode": {
"PostalCodeNumber": "94043"
}
}
}
}
}
}
}
(View full JSON)
placemark2 = {
"AddressDetails": {
"Country" : {
"AdministrativeArea" : {
"Locality" : {
"PostalCode" : {
"PostalCodeNumber" : "11201"
}
}
}
}
}
}
(View full JSON)
So the zipcodes:
zipcode1 = placemark1['AddressDetails']['Country']['AdministrativeArea']['SubAdministrativeArea']['Locality']['PostalCode']['PostalCodeNumber']
zipcode2 = placemark2['AddressDetails']['Country']['AdministrativeArea']['Locality']['PostalCode']['PostalCodeNumber']
Now I was thinking perhaps I should just search the multi-dimensional dictionary for "PostalCodeNumber" key. Does anyone have any idea on how to accomplish this? I want it to look something like this:
>>> just_being_a_dict = {}
>>> just_a_list = []
>>> counter_dict = {'Name': 'I like messing things up'}
>>> get_key('PostalCodeNumber', placemark1)
"94043"
>>> get_key('PostalCodeNumber', placemark2)
"11201"
>>> for x in (just_being_a_dict, just_a_list, counter_dict):
... get_key('PostalCodeNumber', x) is None
True
True
True
def get_key(key,dct):
if key in dct:
return dct[key]
for k in dct:
try:
return get_key(key,dct[k])
except (TypeError,ValueError):
pass
else:
raise ValueError
placemark1 = {
"AddressDetails": {
"Country": {
"AdministrativeArea": {
"SubAdministrativeArea": {
"Locality": {
"PostalCode": {
"PostalCodeNumber": "94043"
}
}
}
}
}
}
}
placemark2 = {
"AddressDetails": {
"Country" : {
"AdministrativeArea" : {
"Locality" : {
"PostalCode" : {
"PostalCodeNumber" : "11201"
}
}
}
}
}
}
just_being_a_dict = {}
just_a_list = []
counter_dict = {'Name': 'I like messing things up'}
for x in (placemark1, placemark2, just_being_a_dict, just_a_list, counter_dict):
try:
print(get_key('PostalCodeNumber', x))
except ValueError:
print(None)
yields
94043
11201
None
None
None
from collections import Mapping
zipcode1 = {'placemark1':{'AddressDetails':{'Country':{'AdministrativeArea':{'SubAdministrativeArea':{'Locality':{'PostalCode':{'PostalCodeNumber':"94043"}}}}}}}}
zipcode2 = {'placemark2':{'AddressDetails':{'Country':{'AdministrativeArea':{'Locality':{'PostalCode':{'PostalCodeNumber':'11201'}}}}}}}
def treeGet(d, name):
if isinstance(d, Mapping):
if name in d:
yield d[name]
for it in d.values():
for found in treeGet(it, name):
yield found
yields all matching values in tree:
>>> list(treeGet(zipcode1, 'PostalCodeNumber'))
['94043']
>>> list(treeGet(zipcode2, 'PostalCodeNumber'))
['11201']

Resources