Merge two hashes in order to create a json file - ruby

Hi I want to merge two hashes into a json file which should look like this:
{
"name": "MS Suite",
"version": "2017.1.0",
"components": [
{
"code": "1x.2017",
"name": "microservice1",
"version": "1.1.3-1"
},
{
"code": "3x.2017",
"name": "microservice2",
"version": "1.1.15-1"
}
]
}
Please find below hash1:
{
"name": "MS Suite",
"version": "2017.1.0"
}
Please find below hash2 which is an array:
[
{
"code": "1x.2017",
"name": "microservice1",
"version": "1.1.3-1"
},
{
"code": "3x.2017",
"name": "microservice2",
"version": "1.1.15-1"
}
]

You're looking for Hash#[]=:
hash = {
"name": "MS Suite",
"version": "2017.1.0"
}
array = [
{
"code": "1x.2017",
"name": "microservice1",
"version": "1.1.3-1"
},
{
"code": "3x.2017",
"name": "microservice2",
"version": "1.1.15-1"
}
]
hash['components'] = array
hash
#=> {:name=>"MS Suite", :version=>"2017.1.0", "components"=>[{:code=>"1x.2017", :name=>"microservice1", :version=>"1.1.3-1"}, {:code=>"3x.2017", :name=>"microservice2", :version=>"1.1.15-1"}]}
To convert it to json:
require 'json'
hash.to_json
#=> "{\"name\":\"MS Suite\",\"version\":\"2017.1.0\",\"components\":[{\"code\":\"1x.2017\",\"name\":\"microservice1\",\"version\":\"1.1.3-1\"},{\"code\":\"3x.2017\",\"name\":\"microservice2\",\"version\":\"1.1.15-1\"}]}"

Related

rethinkdb python eqJoin with filter not working

I have two tables records
[
{
"payload": {
"uuid": "123",
"version": "1.0.0"
},
"record_id":"rec-123"
},
{
"payload": {
"uuid": "456",
"version": "1.0.1"
},
"record_id":"rec-456"
}
]
and records_master
[
{
"version": {
"mode": 1
},
"id": "rec-123",
"title": "Test-123"
},
{
"version": {
"mode": 0
},
"title": "Test-456",
"id": "rec-456"
}
]
I want to get records for matching payload.uuid and payload.version in records and version.mode should be 1 in records_master.
data = r.table("records").filter(
lambda doc:
(doc["payload"]["uuid"]== <some_value>) &
(doc["payload"]["version"]== <some_version>)
).coerce_to("array")
.run(connection)
This gives me all records for a matching payload.uuid and payload.version .
But unable to make it work for eqJoin with records-master
data = r.table('records').eq_join('record_id', r.table('records-master')).filter(
lambda doc,master:
(doc["payload"]["uuid"]== <some-value>) &
(doc["payload"]["version"]== <some-version>) &
(master["version"]["mode"] ==1)
).run(connection)
This query worked for me
data = r.table("records").filter(
lambda doc:
(doc["payload"]["uuid"]== <some_value>) &
(doc["payload"]["version"]== <some_version>)
).eq_join('record_id',
r.table('records-master')
)\
.filter(lambda master:(master["right"]["version"]["mode"]== 1))\
.coerce_to("array")\
.run(connection)

Compare two JSON arrays using two or more columns values in Dataweave 2.0

I had a task where I needed to compare and filter two JSON arrays based on the same values using one column of each array. So I used this answer of this question.
However, now I need to compare two JSON arrays matching two, or even three columns values.
I already tried to use one map inside other, however, it isn't working.
The examples could be the ones in the answer I used. Compare db.code = file.code, db.name = file.nm and db.id = file.identity
var db = [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
var file = [
{
"IDENTITY": "D40000",
"NM": "Delta",
"CODE": "D12"
},
{
"IDENTITY": "C30000",
"NM": "Charlie",
"CODE": "C11"
}
]
See if this works for you
%dw 2.0
output application/json
var file = [
{
"IDENTITY": "D40000",
"NM": "Delta",
"CODE": "D12"
},
{
"IDENTITY": "C30000",
"NM": "Charlie",
"CODE": "C11"
}
]
var db = [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
---
file flatMap(v) -> (
db filter (v.IDENTITY == $.ID and v.NM == $.NAME and v.CODE == $.CODE)
)
Using flatMap instead of map to flatten otherwise will get array of arrays in the output which is cleaner unless you are expecting a possibility of multiple matches per file entry, in which case I'd stick with map.
You can compare objects in DW directly, so the solution you linked can be modified to the following:
%dw 2.0
import * from dw::core::Arrays
output application/json
var db = [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
var file = [
{
"IDENTITY": "D40000",
"NM": "Delta",
"CODE": "D12"
},
{
"IDENTITY": "C30000",
"NM": "Charlie",
"CODE": "C11"
}
]
---
db partition (e) -> file contains {IDENTITY:e.ID,NM:e.NAME,CODE:e.CODE}
You can make use of filter directly and using contains
db filter(value) -> file contains {IDENTITY: value.ID, NM: value.NAME, CODE: value.CODE}
This tells you to filter the db array based on if the file contains the object {IDENTITY: value.ID, NM: value.NAME, CODE: value.CODE}. However, this will not work if objects in the file array has other fields that you will not use for comparison. Using above, you can update filter condition to check if an object in file array exist (using data selector) where the condition applies. You can use below to check that.
db filter(value) -> file[?($.IDENTITY==value.ID and $.NM == value.NAME and $.CODE == value.CODE)] != null

Combine json response in nifi

We are calling invokehttp processes and getting response which json. Example
{
"id": "h569gcjhcm",
"doi": {
"id": "10.17632/h569gcjhcm.1",
"status": "allocated",
"prefix": "10.17632"
},
"name": "Data for: Flooding of the Caspian Sea at the intensification of Northern Hemisphere Glaciations",
"description": "Supplementary data for the Jeirankechmez section in Azerbaijan.\n\n- Appendix A contains all paleomagnetic data and interpretations of the Jeirankechmez section. This .dir file can be imported into the paleomagnetism.org webportal under \"Interpretation Portal\", \"Advanced Options\", \"Import Application Save\". For further details on the use of paleomagnetism.org please refer to the article by Koymans et al. (2016) - https://doi.org/10.1016/j.cageo.2016.05.007.\n- Appendix B contains the magnetic susceptibility data for the analysed samples, including geographic coordinates and stratigraphic levels.\n- Appendix C contains the 40Ar/39Ar data for the three analysed volcanic ash layers. ",
"version": 1,
"publish_date": "2019-01-29T12:51:38.090Z",
"data_licence": {
"id": "01d9c749-3c4d-4431-9df3-620b2dcfe144",
"short_name": "CC BY 4.0",
"full_name": "Creative Commons Attribution 4.0 International",
"description": "This dataset is licensed under a Creative Commons Attribution 4.0 International licence.\n\nWhat does this mean?\nYou can share, copy and modify this dataset so long as you give appropriate credit, provide a link to the CC BY license, and indicate if changes were made, but you may not do so in a way that suggests the rights holder has endorsed you or your use of the dataset. Note that further permission may be required for any content within the dataset that is identified as belonging to a third party.",
"url": "http://creativecommons.org/licenses/by/4.0",
"category": "Creative"
},
"contributors": [
{
"first_name": "Christiaan",
"last_name": "van Baak"
},
{
"first_name": "Marius",
"last_name": "Stoica"
},
{
"first_name": "Arjen",
"last_name": "Grothe"
},
{
"first_name": "Gareth",
"last_name": "Davies"
},
{
"profile_id": "72970719-95c8-341b-80d2-afa9e7154baf",
"first_name": "Wout",
"last_name": "Krijgsman"
},
{
"profile_id": "3a4bfe2c-4098-3859-9b88-789fa993e05a",
"first_name": "Keith",
"last_name": "Richards"
},
{
"profile_id": "f1660f3c-ebbd-3289-8240-1f4ea7913df4",
"first_name": "Klaudia",
"last_name": "Kuiper"
},
{
"first_name": "Elmira",
"last_name": "Aliyeva"
}
],
"versions": [
{
"version": 1,
"publish_date": "2019-01-29T12:51:38.090Z",
"available": true
}
],
"files": [
{
"filename": "Appendix_A_Jeirankechmez_pmag_interpretations.dir",
"id": "f2f4cba7-2411-4737-a9b2-f094db30dca1",
"content_details": {
"id": "994bc865-5300-4d76-a373-e528ccd830e8",
"sha256_hash": "2427c4b077372760973ce8224694f2a2ee5383c7f022ad818164d847a20e27cc",
"sha1_hash": "73792dc6d6eb2c1de1e04926ba5d4420dd0aaece",
"content_type": "application/x-director",
"size": 917022,
"created_date": "2019-01-03T00:00:00.000Z"
"download_expiry_time": "2019-01-29T13:52:25.729Z"
},
"metrics": {
"downloads": 0,
"previews": 0
}
},
{
"filename": "Appendix_B_Sample_locations_susceptibility.xlsx",
"id": "64241bf0-5279-49e8-a505-be9075b910e1",
"content_details": {
"id": "af8809d0-8e63-4599-abaa-e7af9ad39959",
"sha256_hash": "0588f44a0cbd477aa2798323e57ce0b2d4a118e767c0b1ffdc9eb1017e4d23c2",
"sha1_hash": "02e89f6f197ebf495e1e2c3d1aab250efc7545e7",
"content_type": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"size": 24770,
"created_date": "2019-01-03T00:00:00.000Z"
,
"download_expiry_time": "2019-01-29T13:52:25.732Z"
},
"metrics": {
"downloads": 0,
"previews": 0
}
},
{
"filename": "Appendix_C_ArAr_data.xlsx",
"id": "2e912027-ff3f-48ad-98b9-b643b59ba0e3",
"content_details": {
"id": "4960377c-060d-41f6-b7af-150617d8ebeb",
"sha256_hash": "235dc32c1e99f350ee5c99908a5f5d72d1aeeab02f78c2e0181d585bd1880fa6",
"sha1_hash": "6483156e4577948cac5d2679eee862c76faed1c9",
"content_type": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"size": 18510,
"created_date": "2019-01-03T00:00:00.000Z"
},
"metrics": {
"downloads": 0,
"previews": 0
}
}
],
"articles": [
{
"id": "10.1016/j.gloplacha.2019.01.007",
"title": "Flooding of the Caspian Sea at the intensification of Northern Hemisphere Glaciations",
"doi": "10.1016/j.gloplacha.2019.01.007",
"journal": {
"issn": "0921-8181",
"name": "Global and Planetary Change",
"url": "http://www.sciencedirect.com/science/journal/09218181"
}
}
],
"categories": [
{
"id": "http://com/vocabulary/OmniScience/Concept-170590667",
"label": "Geology"
},
{
"id": "http://data.elsevier.com/vocabulary/OmniScience/Concept-473860195",
"label": "Strontium Isotope"
}
],
"institutions": [ ],
"metrics": {
},
"available": true,
"related_links": [ ]
}
I am using $contributors.profile_id from above json to call new endpoint(invokeshttp) (https://api.xxx.com/profile/$.profile_id)
Json response for this
"contributors": [
{
“profile_id”:”cedferfiherhforhforf”
"first_name": “xxx”,
"last_name": "van Baak”,
“other_ids”:[] ,
“Other info”: “deeded” }
I have to call this endpoint depending upon number of object in contributor(let say we have 5 object in contributor ,so I have to call this endpoint 5 time)and combine these 5 response together
Then I have to merge the response(above response to the main response )
just an example:
EvaluateJsonPath to extract "id" into attribute, later join by this attribute
SplitJson to split your json by "contributors"
call endpoint
MergeContent merge by "id" and with count after SplitJson

Sorting an array of objects based on object property

I have an array like so:
[
{
"name": "aabb",
"commit": {
"id": "1",
"message": "aabb ",
"committed_date": "2018-04-04T15:11:04.000+05:30",
"committer_name": "ak",
"committer_email": "ak#ak.in"
},
"protected": false
},
{
"name": "aacc",
"commit": {
"id": "2",
"message": "aacc ",
"committed_date": "2018-02-04T15:11:04.000+05:30",
"committer_name": "ak",
"committer_email": "ak#ak.in"
},
"protected": false
},
{
"name": "aadd",
"commit": {
"id": "3",
"message": "aadd ",
"committed_date": "2018-04-01T15:11:04.000+05:30",
"committer_name": "ak",
"committer_email": "ak#ak.in"
},
"protected": false
}
]
I need to sort this array based on committed_date. How do I do that?
Do I have to loop and write a custom sorting function or does Ruby offers something out-of-box?
Using sort_by
array.sort_by {|obj| obj.attribute}
Or more concise
array.sort_by(&:attribute)
In your case
array.sort_by {|obj| obj[:commit][:committed_date]}

Transferring JSON Data into an array using ruby

This is my JSON code
{
"jobs": [
{
"id": 1,
"title": "Software Developer",
"applicants": [
{
"id": 1,
"name": "Rich Hickey",
"tags": ["clojure", "java", "immutability", "datomic", "transducers"]
},
{
"id": 2,
"name": "Guido van Rossum",
"tags": ["python", "google", "bdfl", "drop-box"]
}
]
},
{
"id": 2,
"title": "Software Architect",
"applicants": [
{
"id": 42,
"name": "Rob Pike",
"tags": ["plan-9", "TUPE", "go", "google", "sawzall"]
},
{
"id": 2,
"name": "Guido van Rossum",
"tags": ["python", "google", "bdfl", "drop-box"]
},
{
"id": 1337,
"name": "Jeffrey Dean",
"tags": ["spanner", "BigTable", "MapReduce", "deep learning", "massive clusters"]
}
]
}
]
}
I want to put the list of "Jobs" in an array using ruby.
I have the following code so far.
require 'json'
file = File.read(filepath)
data_hash = JSON.parse(file)
How do I iterate on the data_hash and chose what information I want and place it in an array?
You can use Array#each because data_hash['jobs'] contains an array of jobs:
data_hash['jobs'].each {|job| ... }
Like this,
arr = Array.new
data_hash.each { |job|
arr.insert(job['name'])
}
use Array#map for shorter code
data_hash['jobs'].map do |job|
# Do whatever you want with the job here
properties = %w(title applicants)
job.select{ |key| properties.include?(key) }
end

Resources