Lets say I have something like this:
class FruitCount
attr_accessor :name, :count
def initialize(name, count)
#name = name
#count = count
end
end
obj1 = FruitCount.new('Apple', 32)
obj2 = FruitCount.new('Orange', 5)
obj3 = FruitCount.new('Orange', 3)
obj4 = FruitCount.new('Kiwi', 15)
obj5 = FruitCount.new('Kiwi', 1)
fruit_counts = [obj1, obj2, obj3, obj4, obj5]
Now what I need, is a function build_fruit_summary which due to a given fruit_counts array, it returns the following summary:
fruits_summary = {
fruits: [
{
name: 'Apple',
count: 32
},
{
name: 'Orange',
count: 8
},
{
name: 'Kiwi',
count: 16
}
],
total: {
name: 'AllFruits',
count: 56
}
}
I just cannot figure out the best way to do the aggregations.
Edit:
In my example I have more than one count.
class FruitCount
attr_accessor :name, :count1, :count2
def initialize(name, count1, count2)
#name = name
#count1 = count1
#count2 = count2
end
end
Ruby's Enumerable is your friend, particularly each_with_object which is a form of reduce.
You first need the fruits value:
fruits = fruit_counts.each_with_object([]) do |fruit, list|
aggregate = list.detect { |f| f[:name] == fruit.name }
if aggregate.nil?
aggregate = { name: fruit.name, count: 0 }
list << aggregate
end
aggregate[:count] += fruit.count
aggregate[:count2] += fruit.count2
end
UPDATE: added multiple counts within the single fruity loop.
The above will serialize each fruit object - maintaining a count for each fruit - into a hash and aggregate them into an empty list array, and assign the aggregate array to the fruits variable.
Now, get the total value:
total = { name: 'AllFruits', count: fruit_counts.map { |f| f.count + f.count2 }.reduce(:+) }
UPDATE: total taking into account multiple count attributes within a single loop.
The above maps the fruit_counts array, plucking each object's count attribute, resulting in an array of integers. Then, reduce is getting the sum of the array's integers.
Now put it all together into the summary:
fruits_summary = { fruits: fruits, total: total }
You can formalize this in an OOP style by introducing a FruitCollection object that uses the Enumerable module:
class FruitCollection
include Enumerable
def initialize(fruits)
#fruits = fruits
end
def summary
{ fruits: fruit_counts, total: total }
end
def each(&block)
#fruits.each &block
end
def fruit_counts
each_with_object([]) do |fruit, list|
aggregate = list.detect { |f| f[:name] == fruit.name }
if aggregate.nil?
aggregate = { name: fruit.name, count: 0 }
list << aggregate
end
aggregate[:count] += fruit.count
aggregate[:count2] += fruit.count2
end
end
def total
{ name: 'AllFruits', count: map { |f| f.count + f.count2 }.reduce(:+) }
end
end
Now pass your fruit_count array into that object:
fruit_collection = FruitCollection.new fruit_counts
fruits_summary = fruit_collection.summary
The reason the above works is by overriding the each method which Enumerable uses under the hood for every enumerable method. This means we can call each_with_object, reduce, and map (among others listed in the enumerable docs above) and it will iterate over the fruits since we told it to in the above each method.
Here's an article on Enumerable.
UPDATE: your multiple counts can be easily added by adding a total attribute to your fruit object:
class FruitCount
attr_accessor :name, :count1, :count2
def initialize(name, count1, count2)
#name = name
#count1 = count1
#count2 = count2
end
def total
#count1 + #count2
end
end
Then just use fruit.total whenever you need to aggregate the totals:
fruit_counts.map(&:total).reduce(:+)
fruits_summary = {
fruits: fruit_counts
.group_by { |f| f.name }
.map do |fruit_name, objects|
{
name: fruit_name,
count: objects.map(&:count).reduce(:+)
}
end,
total: {
name: 'AllFruits',
count: fruit_counts.map(&:count).reduce(:+)
}
}
Not very efficient way, though :)
UPD: fixed keys in fruits collection
Or slightly better version:
fruits_summary = {
fuits: fruit_counts
.reduce({}) { |acc, fruit| acc[fruit.name] = acc.fetch(fruit.name, 0) + fruit.count; acc }
.map { |name, count| {name: name, count: count} },
total: {
name: 'AllFruits',
count: fruit_counts.map(&:count).reduce(:+)
}
}
counts = fruit_counts.each_with_object(Hash.new(0)) {|obj, h| h[obj.name] += obj.count}
#=> {"Apple"=>32, "Orange"=>8, "Kiwi"=>16}
fruits_summary =
{ fruits: counts.map { |name, count| { name: name, count: count } },
total: { name: 'AllFruits', count: counts.values.reduce(:+) }
}
#=> {:fruits=>[
# {:name=>"Apple", :count=>32},
# {:name=>"Orange", :count=> 8},
# {:name=>"Kiwi", :count=>16}],
# :total=>
# {:name=>"AllFruits", :count=>56}
# }
Related
Active Support's deep_transform_values recursively transforms all values of a hash. However, is there a similar method that would allow to access the keys of values while transforming?
I'd like to be able to do the following:
keys_not_to_transform = ['id', 'count']
response = { result: 'ok', errors: [], data: { id: '123', price: '100.0', quotes: ['1.0', '2.0'] }, count: 10 }
response.deep_transform_values! do |key, value|
# Use value's key to help decide what to do
return value if keys_not_to_transform.any? key.to_s
s = value.to_s
if s.present? && /\A[+-]?\d+(\.\d+)?\z/.match?(s)
return BigDecimal(s)
else
value
end
end
#Expected result
# =>{:result=>"ok", :errors=>[], :data=>{:id=>"123", :price=>0.1e3, :quotes=>[0.1e1, 0.2e1]}, :count=>10}
Note that we are not interested in transforming the key itself, just having it on hand while transforming the corresponding values.
You could use Hash#deep_merge! (provided by ActiveSupport) like so:
keys_not_to_transform = ['id', 'count']
transform_value = lambda do |value|
s = value.to_s
if s.present? && /\A[+-]?\d+(\.\d+)?\z/.match?(s)
BigDecimal(s)
else
value
end
end
transform = Proc.new do |key,value|
if keys_not_to_transform.include? key.to_s
value
elsif value.is_a?(Array)
value.map! do |v|
v.is_a?(Hash) ? v.deep_merge!(v,&transform) : transform_value.(v)
end
else
transform_value.(value)
end
end
response = { result: 'ok', errors: [], data: { id: '123', price: '100.0', quotes: ['1.0', '2.0'], other: [{id: '124', price: '17.0'}] }, count: 10 }
response.deep_merge!(response, &transform)
This outputs:
#=>{:result=>"ok", :errors=>[], :data=>{:id=>"123", :price=>0.1e3, :quotes=>[0.1e1, 0.2e1], :other=>[{:id=>"124", :price=>0.17e2}]}, :count=>10}
I'd just implement the necessary transformation logic with plain old Ruby and a bit of recursion, no external dependencies needed. For example:
def transform(hash, ignore_keys: [])
hash.each_with_object({}) do |(key, value), result|
if value.is_a?(Hash)
result[key] = transform(value, ignore_keys: ignore_keys)
elsif ignore_keys.include?(key.to_s)
result[key] = value
elsif value.to_s =~ /\A[+-]?\d+(\.\d+)?\z/
result[key] = BigDecimal(value)
else
result[key] = value
end
end
end
keys_not_to_transform = %w[id count]
response = { result: 'ok', errors: [], data: { id: '123', price: '100.0' }, count: 10 }
transform(response, ignore_keys: keys_not_to_transform)
# => {:result=>"ok", :errors=>[], :data=>{:id=>"123", :price=>#<BigDecimal:5566613bb128,'0.1E3',9(18)>}, :count=>10}
I'm learning coding, and one of the assignments is to return keys is return the names of people who like the same TV show.
I have managed to get it working and to pass TDD, but I'm wondering if I've taken the 'long way around' and that maybe there is a simpler solution?
Here is the setup and test:
class TestFriends < MiniTest::Test
def setup
#person1 = {
name: "Rick",
age: 12,
monies: 1,
friends: ["Jay","Keith","Dave", "Val"],
favourites: {
tv_show: "Friends",
things_to_eat: ["charcuterie"]
}
}
#person2 = {
name: "Jay",
age: 15,
monies: 2,
friends: ["Keith"],
favourites: {
tv_show: "Friends",
things_to_eat: ["soup","bread"]
}
}
#person3 = {
name: "Val",
age: 18,
monies: 20,
friends: ["Rick", "Jay"],
favourites: {
tv_show: "Pokemon",
things_to_eat: ["ratatouille", "stew"]
}
}
#people = [#person1, #person2, #person3]
end
def test_shared_tv_shows
expected = ["Rick", "Jay"]
actual = tv_show(#people)
assert_equal(expected, actual)
end
end
And here is the solution that I found:
def tv_show(people_list)
tv_friends = {}
for person in people_list
if tv_friends.key?(person[:favourites][:tv_show]) == false
tv_friends[person[:favourites][:tv_show]] = [person[:name]]
else
tv_friends[person[:favourites][:tv_show]] << person[:name]
end
end
for array in tv_friends.values()
if array.length() > 1
return array
end
end
end
It passes, but is there a better way of doing this?
I think you could replace those for loops with the Array#each. But in your case, as you're creating a hash with the values in people_list, then you could use the Enumerable#each_with_object assigning a new Hash as its object argument, this way you have your own person hash from the people_list and also a new "empty" hash to start filling as you need.
To check if your inner hash has a key with the value person[:favourites][:tv_show] you can check for its value just as a boolean one, the comparison with false can be skipped, the value will be evaluated as false or true by your if statement.
You can create the variables tv_show and name to reduce a little bit the code, and then over your tv_friends hash to select among its values the one that has a length greater than 1. As this will give you an array inside an array you can get from this the first element with first (or [0]).
def tv_show(people_list)
tv_friends = people_list.each_with_object(Hash.new({})) do |person, hash|
tv_show = person[:favourites][:tv_show]
name = person[:name]
hash.key?(tv_show) ? hash[tv_show] << name : hash[tv_show] = [name]
end
tv_friends.values.select { |value| value.length > 1 }.first
end
Also you can omit parentheses when the method call doesn't have arguments.
Suppose I have an Array like this
data = [
{
key: val,
important_key_1: { # call this the big hash
key: val,
important_key_2: [
{ # call this the small hash
key: val,
},
{
key: val,
},
]
},
},
{
key: val,
important_key_1: {
key: val,
important_key_2: [
{
key: val,
},
{
key: val,
},
]
},
},
]
I want to create a lazy enumerator that would return the next small hash on each #next, and move on to the next big hash and do the same when the first big hash reaches the end
The easy way to return all the internal hashes that I want would be something like this
data[:important_key_1].map do |internal_data|
internal_data[:important_key_2]
end.flatten
Is there someway to do this or do I need to implement my own logic ?
This returns a lazy enumerator which iterates over all the small hashes :
def lazy_nested_hashes(data)
enum = Enumerator.new do |yielder|
data.each do |internal_data|
internal_data[:important_key_1][:important_key_2].each do |small_hash|
yielder << small_hash
end
end
end
enum.lazy
end
With your input data and a val definition :
#i = 0
def val
#i += 1
end
It outputs :
puts lazy_nested_hashes(data).to_a.inspect
#=> [{:key=>3}, {:key=>4}, {:key=>7}, {:key=>8}]
puts lazy_nested_hashes(data).map { |x| x[:key] }.find { |k| k > 3 }
#=> 4
For the second example, the second big hash isn't considered at all (thanks to enum.lazy)
I have been trying to tune this method that sets up complex assignment and I am looking for other options to make this function pass the cops.
Would anyone have thoughts to point me in the right direction?
Right now, I am tinkering with breaking out the two inner .map calls.
Failing Cops
Assignment Branch Condition size for parse_items is too high. [24.08/15]
def parse_items
Avoid multi-line chains of blocks.
end.compact.map do |opt|
The problem code
def parse_items
options = parse_relationships
options = options.select { |opt| opt['type'] == 'product_options' }
options.map do |opt|
parse_included.detect { |x| x['id'] == opt['id'] }
end.compact.map do |opt|
{
group_id: #payload['id'],
originator_id: opt['id'],
price: opt['attributes']['price'],
description: opt['attributes']['name'],
exp_quantity: opt['attributes']['quantity'].to_i,
title: parse_attributes['name'],
image_originator_url: 'image_for_product',
updated_at: timestamp
}
end
end
Helper Methods
private
def parse_data
#payload['data']
rescue
[]
end
def parse_included
#payload['included']
rescue
[]
end
def parse_attributes
#payload['data']['attributes']
rescue
[]
end
def parse_relationships
#payload['data']['relationships']['options']['data']
rescue
[]
end
def timestamp
Time.parse(parse_attributes['updated_at'])
end
Updated Errors
In the spec: wrong number of arguments (given 2, expected 1) for Failure/Error: SELECT = ->(opt) { opt['type'] == 'product_options' }
Assignment Branch Condition size for parse_items is too high. [17/15]
Updated Code
SELECT = ->(opt) { opt['type'] == 'product_options' }
MAP = ->(opt) { parse_included.detect { |x| x['id'] == opt['id'] } }
def parse_items
parse_relationships.select(&SELECT).map(&MAP).compact.map do |opt|
{
group_id: #payload['id'],
originator_id: opt['id'],
price: opt['attributes']['price'],
description: opt['attributes']['name'],
exp_quantity: opt['attributes']['quantity'].to_i,
title: parse_attributes['name'],
image_originator_url: 'image_for_product',
updated_at: timestamp
}
end
end
I was able to refactor this making it far cleaner and pass all the cops! Hooray!
def parse_items
assign_item_attributes(select_included_option(select_item_options(parse_relationships['options']['data'])))
end
def select_included_option(options)
options.map do |opt|
parse_included.detect { |x| x['id'] == opt['id'] }
end
end
def assign_item_attributes(options)
options.compact.map do |opt|
{
group_id: #payload['id'],
originator_id: opt['id'],
price: opt['attributes']['price'],
description: opt['attributes']['name'],
exp_quantity: opt['attributes']['quantity'].to_i,
title: parse_attributes['name'],
image_originator_url: parse_image,
updated_at: parse_timestamp
}
end
end
def select_item_options(options)
options.select { |opt| opt['type'] == 'product_options' }
end
Given that Venue has a field "featured_level" that could go from 0 to N.
I need to return venues ordered by featured_level, but randomizing the order of the venues with the same featured_level.
UPDATE:
Using the given answer I have this method:
def self.by_featured_level
all.group_by {|v| v.featured_level}.inject([]) { |memo, (level,values)|
memo << { level => values.shuffle }
}.map { |hash| hash.values }.flatten.reverse
end
But it fails the following test (also doesn't work properly with real numbers), I'm still trying to figure it out, test used:
describe "by featured level" do
before do
#venue1 = create(:venue, featured_level: 5)
#venue2 = create(:venue, featured_level: 2)
#venue3 = create(:venue, featured_level: 4)
#venue4 = create(:venue, featured_level: 2)
#venue5 = create(:venue, featured_level: 0)
#venue6 = create(:venue, featured_level: 2)
#venues = Venue.by_featured_level
end
it {
start_with_hightest = #venues.index(#venue1) == 0
expect(start_with_hightest).to be_truthy
}
it {
second_hightest_is_2nd = #venues.index(#venue3) == 1
expect(second_hightest_is_2nd).to be_truthy
}
it {
ends_with_lowest = #venues.last.id == #venue5.id
expect(ends_with_lowest).to be_truthy
}
end
Let's start by collecting the venues:
require 'ostruct'
#venue1 = OpenStruct.new(id: :a, level: 0)
#venue2 = OpenStruct.new(id: :b, level: 2)
#venue3 = OpenStruct.new(id: :c, level: 2)
#venue4 = OpenStruct.new(id: :d, level: 2)
#venue5 = OpenStruct.new(id: :e, level: 4)
#venue6 = OpenStruct.new(id: :f, level: 5)
#venues = [#venue1, #venue2, #venue3, #venue4, #venue5, #venue6]
We want to randomize per level, so we'll create groups:
#venues.group_by { |v| v.level }
This returns a hash where the venues are indexed by each level.
Now to iterate over the hash and return a new hash where the values have been randomized:
#venues.
group_by {|v| v.level}.
inject([]) { |memo, (level,values)|
memo << { level => values.shuffle }
}
This hash can now be flattened so the final result is a list of hashes, randomized per level:
#venues.
group_by {|v| v.level}.
inject([]) { |memo, (level,values)|
memo << { level => values.shuffle }
}.
map { |hash| hash.values }.
flatten
I originally needed an ActiveRecord::Relation instead of an Array (which is return by #zezetic's answer, so in the end I did:
scope :by_featured_level, -> {
select("venues.*, (random() * 9 + 1) AS rdn").order('featured_level DESC, rdn') }