Currently I have this array =
[["abc", [0.0, 1.0, 2.0, 3.0], "Testing"], ["efg", [1.0, 2.0, 3.0, 4.0], "Testing"]]
Condition:
if each of nested array index2 is the same then I want to sum up with both
[0.0 + 1.0, 1.0 + 2.0, 2.0 + 3.0, 3.0 + 4.0] = [1.0, 3.0, 5.0, 7.0]
The final result I want:
[["efg", [1.0, 3.0, 5.0, 7.0], "Testing"]]
Is there any way or suggestion to obtain this result?
I've had fun building this in TDD:
def nested_match_sum(data)
grouped = data.group_by(&:last)
grouped.values.map do |array|
array.inject(nil) do |result, elem|
if result
elem[1] = array_position_sum(elem[1], result[1])
end
elem
end
end
end
def array_position_sum(first, second)
first.zip(second).map do |couple|
couple.first + couple.last
end
end
require 'rspec/autorun'
describe "#nested_match_sum" do
let(:data) do
[
["abc", [0.0, 1.0, 2.0, 3.0], "Testing"],
["efg", [1.0, 2.0, 3.0, 4.0], "Testing"]
]
end
it "groups by last element and aggregates the sum" do
expect(nested_match_sum(data)).to eq(
[["efg", [1.0, 3.0, 5.0, 7.0], "Testing"]]
)
end
context "giving multiple keys" do
let(:data) do
[
["abc", [0.0, 1.0, 2.0, 3.0], "Testing"],
["efg", [1.0, 2.0, 3.0, 4.0], "Testing"],
["abc", [0.0, 1.0, 2.0, 3.0], "Another"],
["ghj", [2.0, 3.0, 4.0, 5.0], "Another"]
]
end
it "works aswell" do
expect(nested_match_sum(data)).to eq([
["efg", [1.0, 3.0, 5.0, 7.0], "Testing"],
["ghj", [2.0, 4.0, 6.0, 8.0], "Another"]
])
end
end
end
describe "#array_position_sum" do
let(:first) { [1, 2, 3] }
let(:second) { [4, 5, 6] }
it "sums two arrays by position" do
expect(array_position_sum(first, second)).to eq(
[5, 7, 9]
)
end
end
Related
Its possible to print a full array in the console with:
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
but is there also an option to export a kind of "number chart image"? e.g.
import numpy as np
numberchart = np.range(100)
WANTED_RESULT.png
I plotted with matplotlib some kind of a heatmap, but Iam looking for image format like .png
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots()
im = ax.imshow(harvest)
y, x = harvest.shape
ax.set_xticks(np.arange(x))
ax.set_yticks(np.arange(y))
plt.setp(ax.get_xticklabels())#, rotation=45, ha="right",
#rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(x):
for j in range(y):
text = ax.text(j, i, harvest[i, j],
ha="center", va="center", color="w")
ax.set_title("NumberChart")
fig.tight_layout()
plt.show()
I did some minor changes to your code. It is more like a work around but does what you hope I beleive:
import matplotlib.pyplot as plt
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots()
im = ax.imshow(harvest*0, cmap="Greys")
y, x = harvest.shape
ax.set_xticks(np.arange(x))
ax.set_yticks(np.arange(y))
plt.setp(ax.get_xticklabels())#, rotation=45, ha="right",
#rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(x):
for j in range(y):
text = ax.text(j, i, harvest[i, j],
ha="center", va="center", color="k")
ax.set_xticks(np.arange(-.5, harvest.shape[0], 1), minor=True)
ax.set_yticks(np.arange(-.5, harvest.shape[1], 1), minor=True)
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
plt.tick_params(
axis='y',
which='both',
left=False,
top=False,
labelleft=False)
ax.grid(which='minor', color='k', linestyle='-', linewidth=1)
ax.set_title("NumberChart")
fig.tight_layout()
plt.show()
For example, if I have a matrix:
import org.apache.spark.mllib.linalg.{Matrices}
// Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0))
val dm: Matrix = Matrices.dense(3, 2, Array(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
dm:
1.0 2.0
3.0 4.0
5.0 6.0
If I want to know get (1,2) of dm which is 2, what should I do.
I searched the internet, and could not find a proper API.
Perhaps this is useful-
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Matrix => OldMatrix}
// Create a dense matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0))
val dm: OldMatrix = OldMatrices.dense(3, 2, Array(1.0, 3.0, 5.0, 2.0, 4.0, 6.0))
println(dm)
/**
* 1.0 2.0
* 3.0 4.0
* 5.0 6.0
*/
// /** Gets the (i, j)-th element. */ index starts from 0
println(dm.apply(0, 1))
// 2.0
I am wondering why this matrix multiplication is not working in my Scala program, versus the result I am receiving when using Python. I am using the matrix multiplication algorithm described by this math: Matrix Multiplication where I have two matrices a = n x m and b = m x p. The code that I have written for this algorithm is (each matrix is a 2d array of doubles):
def dot(other: Matrix2D): Matrix2D ={
if (this.shape(1) != other.shape(0)){
throw new IndexOutOfBoundsException("Matrices were not the right shape! [" + this.shape(1) + " != " + other.shape(0) + "]")
}
val n = this.shape(1) //returns the number of columns, shape(0) returns number of rows
var a = matrix.clone()
var b = other.matrix.clone()
var c = Array.ofDim[Double](this.shape(0), other.shape(1))
for(i <- 0 until c.length){
for (j <- 0 until c(0).length){
for (k <- 0 until n){
c(i)(j) += a(i)(k) * b(k)(j)
}
}
}
Matrix2D(c)
}
The Input I put into both the Scala and Python code is:
a = [[1.0 1.0 1.0 1.0 0.0 0.0 0.0]
[1.0 1.0 0.0 1.0 0.0 0.0 0.0 ]
[1.0 1.0 1.0 1.0 1.0 1.0 1.0 ]
[1.0 0.0 0.0 0.0 1.0 1.0 1.0 ]
[1.0 0.0 0.0 0.0 1.0 0.0 1.0 ]
[1.0 0.0 0.0 0.0 0.0 0.0 0.0 ]]
b = [[0.0 0.0 0.0 ]
[0.0 -0.053430398509053074 0.021149859549078387 ]
[0.0 -0.010785871994186721 0.04942555653681449 ]
[0.0 0.04849323245519227 -0.0393881161667335 ]
[0.0 -0.03871752673999099 0.05228579488821056 ]
[0.0 0.07935206375269452 0.06511344235965408 ]
[0.0 -0.02462677123918247 1.723607966539059E-4 ]]
The output I receive from this function is:
[[0.0 -0.015723038048047533 0.031187299919159375]
[0.0 -0.0049371660538608045 -0.018238256617655116]
[0.0 2.84727725473527E-4 0.14875889796367792 ]
[0.0 0.01600776577352106 0.11757159804451854 ]
[0.0 -0.06334429797917346 0.05245815568486446 ]
[0.0 0.0 0.0 ]]
compared to python's numpy.dot algorithm:
[[ 0. -0.01572304 0.0311873 ]
[ 0. -0.00493717 -0.01823826]
[ 0. -0.01572304 0.0311873 ]
[ 0. 0.08912777 0.07801112]
[ 0. 0.00977571 0.01289768]
[ 0. 0.08912777 0.07801112]]
I am wondering why this algorithm doesn't completely fill the output algorithm that I need...I've been messing with the for loops and such and have not been able to figure out whats wrong.
Can you show your Python code?
I tried this in Numpy and get the same as your Scala code:
import numpy as np
a = np.array([[1.0,1.0,1.0,1.0,0.0,0.0,0.0],
[1.0, 1.0, 0.0, 1.0, 0.0,0.0,0.0 ],
[1.0, 1.0, 1.0, 1.0, 1.0,1.0,1.0 ],
[1.0, 0.0, 0.0, 0.0, 1.0 ,1.0,1.0 ],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0,1.0 ],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0,0.0 ]])
b=np.array([[0.0 ,0.0 ,0.0 ],
[0.0 ,-0.053430398509053074 ,0.021149859549078387 ],
[0.0 ,-0.010785871994186721, 0.04942555653681449 ],
[0.0 , 0.04849323245519227 ,-0.0393881161667335 ],
[0.0 ,-0.03871752673999099 , 0.05228579488821056 ],
[0.0 , 0.07935206375269452 , 0.06511344235965408 ],
[0.0 ,-0.02462677123918247 ,1.723607966539059E-4 ]])
print a.dot(b)
prints:
[[ 0. -0.01572304 0.0311873 ]
[ 0. -0.00493717 -0.01823826]
[ 0. 0.00028473 0.1487589 ]
[ 0. 0.01600777 0.1175716 ]
[ 0. -0.0633443 0.05245816]
[ 0. 0. 0. ]]
So I'm writing some WebGL, no THREE.JS. I'm trying to render a cube, with a single texture mapped to every face of the cube. In my code where I set up my attributes I have something like:
var vertices = new Float32Array([
// x, y, z u, v
1.0, 1.0, 1.0, /* v0 right top front */ 1.0, 1.0,
-1.0, 1.0, 1.0, /* v1 left top front */ 0.0, 1.0,
-1.0, -1.0, 1.0, /* v2 left bottom front */ 0.0, 0.0,
1.0, -1.0, 1.0, /* v3 right bottom front */ 1.0, 0.0,
// u's switch for back faces
1.0, -1.0, -1.0, /* v4 right bottom back */ 0.0, 0.0,
1.0, 1.0, -1.0, /* v5 right top back */ 0.0, 1.0,
-1.0, 1.0, -1.0, /* v6 left top back */ 1.0, 1.0,
-1.0, -1.0, -1.0, /* v7 left bottom back */ 1.0, 0.0
]);
// the pairs of vertex triples
// 3 vertices = 1 triangle
// 2 triangles = 1 quad = 1 face
var indices = new Uint8Array([
0, 1, 2, 0, 2, 3, // front
0, 3, 4, 0, 4, 5, // right
//0, 5, 6, 0, 6, 1, // top
1, 6, 7, 1, 7, 2, // left
//7, 4, 3, 7, 3, 2, // bottom
4, 7, 6, 4, 6, 5 // back
]);
I wind up with a cube with the texture reflected for the right and left faces, which is fine. For the top and the bottom, I have no faces because of the two commented out lines. When I comment them in, the faces don't have the texture sampled as I expected. Sure enough, if you look at the indices for the top face for instance, and the UV coordinates that they would have:
index | u | v
0 | 1.0 | 1.0
1 | 0.0 | 1.0
5 | 0.0 | 1.0
6 | 1.0 | 1.0
So we can see that index 1 and 5 (also, 0 and 6) have the same UV coordinates, so of course it wont look right on a quad.
I've been trying to draw out on paper, but I can't change the UV's without messing up another face's coordinates. What I'm wondering is: is it possible to use ELEMENT_ARRAY_BUFFERs to map UV coordinates on a cube, or do I need to use more data and draw using an ARRAY_BUFFER?
== EDIT ==
Looks like a dupe: OpenGL ES - texture map all faces of an 8 vertex cube?
Hate to answer my own question, but based on the hint here, I was able to get it to work by using 24 vertices instead of 8. I can use 24 instead of 36 because I'm repeating indices in my ELEMENT_ARRAY_BUFFER (something I wouldn't be able to do with just an ARRAY_BUFFER).
var vertices = new Float32Array([
// x, y, z, u, v
// front face (z: +1)
1.0, 1.0, 1.0, 1.0, 1.0, // top right
-1.0, 1.0, 1.0, 0.0, 1.0, // top left
-1.0, -1.0, 1.0, 0.0, 0.0, // bottom left
1.0, -1.0, 1.0, 1.0, 0.0, // bottom right
// right face (x: +1)
1.0, 1.0, -1.0, 1.0, 1.0, // top right
1.0, 1.0, 1.0, 0.0, 1.0, // top left
1.0, -1.0, 1.0, 0.0, 0.0, // bottom left
1.0, -1.0, -1.0, 1.0, 0.0, // bottom right
// top face (y: +1)
1.0, 1.0, -1.0, 1.0, 1.0, // top right
-1.0, 1.0, -1.0, 0.0, 1.0, // top left
-1.0, 1.0, 1.0, 0.0, 0.0, // bottom left
1.0, 1.0, 1.0, 1.0, 0.0, // bottom right
// left face (x: -1)
-1.0, 1.0, 1.0, 1.0, 1.0, // top right
-1.0, 1.0, -1.0, 0.0, 1.0, // top left
-1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
-1.0, -1.0, 1.0, 1.0, 0.0, // bottom right
// bottom face (y: -1)
1.0, -1.0, 1.0, 1.0, 1.0, // top right
-1.0, -1.0, 1.0, 0.0, 1.0, // top left
-1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
1.0, -1.0, -1.0, 1.0, 0.0, // bottom right
// back face (x: -1)
-1.0, 1.0, -1.0, 1.0, 1.0, // top right
1.0, 1.0, -1.0, 0.0, 1.0, // top left
1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
-1.0, -1.0, -1.0, 1.0, 0.0 // bottom right
]);
// the pairs of vertex triples
// 3 vertices = 1 triangle
// 2 triangles = 1 quad = 1 face
var indices = new Uint8Array([
0, 1, 2, 0, 2, 3,
4, 5, 6, 4, 6, 7,
8, 9, 10, 8, 10, 11,
12, 13, 14, 12, 14, 15,
16, 17, 18, 16, 18, 19,
20, 21, 22, 20, 22, 23
]);
Scroll above code example ^
The number of vertices can be further reduced, because some indices share the same XYZ and UV coordinates, though if I add normals to my interleaved buffer later (or any other attribute) I may need the repeated values.
array = [0, 0.3, 0.4, 0.2, 0.6]
hash = {
"key1" => array[0..2],
"key2" => array[0..3],
"key3" => array,
"key4" => array,
"key5" => array,
"key6" => array,
"key7" => array
}
Is there a way I can remove the duplication by doing something like
hash = {
"key1" => array[0..2],
"key2" => array[0..3],
%(key3, key4, key5, key6, key7).each {|ele| ele => array}
}
Try
array = [0, 0.3, 0.4, 0.2, 0.6]
hash = {
"key1" => array[0..2],
"key2" => array[0..3]
}
%w(key3 key4 key5 key6 key7).each {|ele| hash[ele] = array}
array = [0, 0.3, 0.4, 0.2, 0.6]
h = Hash[*Array.new(7) {|x| ["key#{x+1}", array[0..(x<2?x+2:-1)]]}.flatten(1)]
h # => {"key1" => [0, 0.3, 0.4], "key2" => [0.3, 0.4, 0.2],...}
Here's a couple variations on a theme. They work with 1.8.7 or 1.9.2. The insertion order is maintained with 1.9.2 'cause that's what it does:
require 'pp'
array = [0, 0.3, 0.4, 0.2, 0.6]
hash = ('key3'..'key7').entries.inject({}) { |m, e| m[e] = array; m }
hash.merge!('key1' => array[0..2], 'key2' => array[0..3])
pp hash
puts '-' * 40
hash = {
'key1' => array[0..2],
'key2' => array[0..3]
}.merge(('key3'..'key7').entries.inject({}) { |m, e| m[e] = array; m })
pp hash
puts '-' * 40
# I think this is the most readable/maintainable
hash = {
'key1' => array[0..2],
'key2' => array[0..3]
}
('key3'..'key7').entries.inject(hash) { |m, e| m[e] = array; m }
pp hash
Which output:
# >> {"key3"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key4"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key5"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key6"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key7"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key1"=>[0, 0.3, 0.4],
# >> "key2"=>[0, 0.3, 0.4, 0.2]}
# >> ----------------------------------------
# >> {"key1"=>[0, 0.3, 0.4],
# >> "key2"=>[0, 0.3, 0.4, 0.2],
# >> "key3"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key4"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key5"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key6"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key7"=>[0, 0.3, 0.4, 0.2, 0.6]}
# >> ----------------------------------------
# >> {"key1"=>[0, 0.3, 0.4],
# >> "key2"=>[0, 0.3, 0.4, 0.2],
# >> "key3"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key4"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key5"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key6"=>[0, 0.3, 0.4, 0.2, 0.6],
# >> "key7"=>[0, 0.3, 0.4, 0.2, 0.6]}
Here is another version:
hash = {
"key1" => array[0..2],
"key2" => array[0..3]
}.tap { |h| ("key3".."key7").each{|k| h[k]=array}}