Where am I going wrong with my bounding box function? - algorithm

I am writing a bounding box module for the octree library. You can find my branch here. In the function below, I try to make explicit the implicit bounding boxes of an Octree. The problem is, not all bounding boxes are valid.
This is the function in question, followed by a way to replicate the problem in ghci.
explicateMBB :: (BBox3, Octree a) -> [BBox3]
explicateMBB (mbb, (Leaf _)) = [mbb]
explicateMBB (mbb, (Node { split = split',
nwu = nwu',
nwd = nwd',
neu = neu',
ned = ned',
swu = swu',
swd = swd',
seu = seu',
sed = sed'
})) =
mbb:concatMap explicateMBB octList
where
octList = zip boxList children
boxList = [swdBox, sedBox, nwdBox, nedBox, swuBox, seuBox, nwuBox, neuBox]
children = [swd',sed',nwd',ned',swu',seu',nwu',neu']
swdBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (minX mbb) (minY mbb) (minZ mbb)
neuCorner = Vector3 (v3x split') (v3y split') (v3z split')
sedBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (v3x split') (minY mbb) (minZ mbb)
neuCorner = Vector3 (maxX mbb) (v3y split') (minZ mbb)
nwdBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (minX mbb) (v3y split') (minZ mbb)
neuCorner = Vector3 (v3x split') (maxY mbb) (v3z split')
nedBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (v3x split') (v3y split') (minZ mbb)
neuCorner = Vector3 (maxX mbb) (maxY mbb) (v3z split')
swuBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (minX mbb) (minY mbb) (v3z split')
neuCorner = Vector3 (v3x split') (v3y split') (maxZ mbb)
seuBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (v3x split') (minY mbb) (v3z split')
neuCorner = Vector3 (maxX mbb) (v3y split') (maxZ mbb)
nwuBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (minX mbb) (v3y split') (v3z split')
neuCorner = Vector3 (v3x split') (maxY mbb) (maxZ mbb)
neuBox = bound_corners swdCorner neuCorner
where
swdCorner = Vector3 (v3x split') (v3y split') (v3z split')
neuCorner = Vector3 (maxX mbb) (maxY mbb) (maxZ mbb)
To replicate problem:
git clone https://github.com/mlitchard/octree.git
git checkout MBB
stack ghci
In ghci, do the following :
:m + Data.List Data.Vector.Class System.Random System.Random.Shuffle Data.BoundingBox.B3
let infinity = (read "Infinity") :: Double
let swdCorner = Vector3 (-infinity) (-infinity) (-infinity)
let neuCorner = Vector3 (infinity) (infinity) (infinity)
let rbb = bound_corners swdCorner neuCorner
xGen <- getStdGen
yGen <- newStdGen
zGen <- newStdGen
let xPoints = shuffle' [-256 .. 256] 513 xGen
let yPoints = shuffle' [-256 .. 256] 513 yGen
let zPoints = shuffle' [-256 .. 256] 513 zGen
let xPoints' = map fromInteger xPoints :: [Double]
let yPoints' = map fromInteger yPoints :: [Double]
let zPoints' = map fromInteger zPoints :: [Double]
let tup513 = zip3 xPoints' yPoints' zPoints'
let construct_vect = (\(x,y,z) -> Vector3 x y z)
let vect513 = map construct_vect tup513
let pre_oct513 = zip vect513 [1 .. 513]
let octree513 = fromList pre_oct513
length $ filter (== False) $ map isValidMBB $ explicateMBB (rbb,octree513)
The answer will be 9, but should be 0.
I feel like one of the fooBox where clauses is wrong, but I have gone over each one several times, and I am not seeing which one it is.
If you needed a visual aid like I did, I found this pic to be helpful. My sample does 2 subdivisions.
Any insight into what is going wrong would be appreciated.

It's your second where clause:
where
swdCorner = Vector3 (v3x split') (minY mbb) (minZ mbb)
neuCorner = Vector3 (maxX mbb) (v3y split') (minZ mbb)
should be:
where
swdCorner = Vector3 (v3x split') (minY mbb) (minZ mbb)
neuCorner = Vector3 (maxX mbb) (v3y split') (v3z split') <-- v3z split'

Related

Remove artifacts from mediapipe rendering program

I am trying to get a simple face mask, but I cannot pin point the cause and get rid of tiny artifacts in the textured output
Example image has a few artifacts marked:
Code:
import cv2
import mediapipe as mp
import triangulation_media_pipe as tmp
import numpy as np
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
face = "face_textures/yash.jpg"
def load_base_img(face_mesh, image_file_name, ):
image = cv2.imread(image_file_name)
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return {"img": image, "landmarks": results}
def transform_landmarks_from_tf_to_ocv(keypoints, face_width, face_height):
landmark_list = []
if (keypoints.multi_face_landmarks != None):
for face_landmarks in keypoints.multi_face_landmarks:
for l in face_landmarks.landmark:
pt = mp_drawing._normalized_to_pixel_coordinates(l.x, l.y,
face_width, face_height)
landmark_list.append(pt)
return landmark_list
def main():
# For webcam input:
face_mesh = mp_face_mesh.FaceMesh()
base_face_handler, landmark_base_ocv, base_input_image = process_base_face_mesh(face_mesh, face)
cap = cv2.VideoCapture(2)
while cap.isOpened():
_, webcam_img = cap.read()
image_rows, image_cols, _ = webcam_img.shape
results = face_mesh.process(webcam_img)
landmark_target_ocv = transform_landmarks_from_tf_to_ocv(results, image_cols, image_rows)
# Draw the face mesh annotations on the image.
image = webcam_img.copy()
img2_new_face = np.zeros_like(image)
if results.multi_face_landmarks:
if True:
for i in range(0, int(len(tmp.TRIANGULATION) / 3)):
triangle_index = [tmp.TRIANGULATION[i * 3],
tmp.TRIANGULATION[i * 3 + 1],
tmp.TRIANGULATION[i * 3 + 2]]
tbas1 = landmark_base_ocv[triangle_index[0]]
tbas2 = landmark_base_ocv[triangle_index[1]]
tbas3 = landmark_base_ocv[triangle_index[2]]
triangle1 = np.array([tbas1, tbas2, tbas3])
rect1 = cv2.boundingRect(triangle1)
(x1, y1, w1, h1) = rect1
cropped_triangle = base_input_image[y1: y1 + h1, x1: x1 + w1]
cropped_tr1_mask = np.zeros((h1, w1), np.uint8)
points = np.array([[tbas1[0] - x1, tbas1[1] - y1],
[tbas2[0] - x1, tbas2[1] - y1],
[tbas3[0] - x1, tbas3[1] - y1]])
cv2.fillConvexPoly(cropped_tr1_mask, points, 255)
ttar1 = landmark_target_ocv[triangle_index[0]]
ttar2 = landmark_target_ocv[triangle_index[1]]
ttar3 = landmark_target_ocv[triangle_index[2]]
triangle2 = np.array([ttar1, ttar2, ttar3])
rect2 = cv2.boundingRect(triangle2)
(x2, y2, w2, h2) = rect2
cropped_tr2_mask = np.zeros((h2, w2), np.uint8)
points2 = np.array([[ttar1[0] - x2, ttar1[1] - y2],
[ttar2[0] - x2, ttar2[1] - y2],
[ttar3[0] - x2, ttar3[1] - y2]])
cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)
# Warp triangles
points = np.float32(points)
points2 = np.float32(points2)
M = cv2.getAffineTransform(points, points2)
warped_triangle = cv2.warpAffine(cropped_triangle, M, (w2, h2))
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask)
# Reconstructing destination face
img2_new_face_rect_area = img2_new_face[y2: y2 + h2, x2: x2 + w2]
img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area, cv2.COLOR_BGR2GRAY)
_, mask_triangles_designed = cv2.threshold(img2_new_face_rect_area_gray, 0, 255,
cv2.THRESH_BINARY_INV)
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle,
mask=mask_triangles_designed)
img2_new_face_rect_area = cv2.add(img2_new_face_rect_area, warped_triangle)
img2_new_face[y2: y2 + h2, x2: x2 + w2] = img2_new_face_rect_area
cv2.imshow('mask', img2_new_face)
key = cv2.waitKey(5)
face_mesh.close()
cap.release()
def process_base_face_mesh(face_mesh,
image_file):
base_face_handler = load_base_img(face_mesh, image_file)
base_input_image = base_face_handler["img"].copy()
image_rows, image_cols, _ = base_face_handler["img"].shape
landmark_base_ocv = transform_landmarks_from_tf_to_ocv(base_face_handler["landmarks"], image_cols, image_rows)
return base_face_handler, landmark_base_ocv, base_input_image
if __name__ == "__main__":
main()
triangulation_media_pipe.py
The code is a slightly modified version of this
A possible solution I added was to get rid of the artifacts is to just mask those areas with a blurred version of the same image, but I want the pixel values to be as close as possible to the real image values.

How to keep only a user-defined quantity of labels?

First of all sorry for the likely dumb question but i have problems with the label.delete() function used to keep a limited number of labels on charts and deleting older ones, i used this code as reference: https://www.tradingview.com/pine-script-docs/en/v4/concepts/Text_and_shapes.html#deleting-labels
on a function used to create pivots but does not seem to work since it deletes all newer labels created without deleting the older ones, here is the code:
//#version=5
indicator(title='RSI Pivot', precision=0, max_labels_count=500)
qtyLabelsInput = input.int(20, 'Labels to keep', minval=0)
myRSI = ta.rsi(close, 20)
plot(myRSI)
drawLabel(_offset, _pivot, _style, _size, _text, _textcolor) =>
if (_pivot)
label.new(bar_index[_offset], _pivot, style=_style, size=_size, text=_text, textcolor=_textcolor)
if array.size(label.all) > qtyLabelsInput
label.delete(array.get(label.all, 0))
leftLenH = input.int(title='Pivot High main', defval=2, minval=1, inline='Pivot High main')
rightLenH = input.int(title='/', defval=2, minval=1, inline='Pivot High main')
leftLenL = input.int(title='Pivot Low main', defval=2, minval=1, inline='Pivot Low main')
rightLenL = input.int(title='/', defval=2, minval=1, inline='Pivot Low main')
ph = ta.pivothigh(myRSI, leftLenH, rightLenH)
pl = ta.pivotlow(myRSI, leftLenL, rightLenL)
//---------------------------------------------------------------------------------------------------------------MAIN label
drawLabel(rightLenH, ph, label.style_none, size.tiny , "T", color.red)
drawLabel(rightLenL, pl, label.style_none, size.tiny, "B", color.green)
EDIT CODE 2:
//#version=5
indicator(title='CMF Pivot', precision=0, max_labels_count=500)
qtyLabelsInput = input.int(20, 'Labels to keep', minval=0)
myRSI = ta.rsi(close, 20)
plot(myRSI, "RSI")
drawLabel(_offset, _pivot, _style, _size, _text, _textcolor, _y) =>
// x y y(2)=na? plot location
var label[] label_array = array.new_label(qtyLabelsInput)
if (_pivot)
array.unshift(label_array, label.new(bar_index[_offset], _pivot, style=_style, size=_size, text=_text, textcolor=_textcolor, y=_y))
label.delete(array.pop(label_array))
//cmf code
var cumVol = 0.
cumVol += nz(volume)
if barstate.islast and cumVol == 0
runtime.error("No volume is provided by the data vendor.")
length = input.int(20, minval=1)
ad = close==high and close==low or high==low ? 0 : ((2*close-low-high)/(high-low))*volume
cmf = math.sum(ad, length) / math.sum(volume, length)
//
leftLenH = input.int(title='Pivot High main', defval=2, minval=1, inline='Pivot High main')
rightLenH = input.int(title='/', defval=2, minval=1, inline='Pivot High main')
leftLenL = input.int(title='Pivot Low main', defval=2, minval=1, inline='Pivot Low main')
rightLenL = input.int(title='/', defval=2, minval=1, inline='Pivot Low main')
ph = ta.pivothigh(cmf, leftLenH, rightLenH)
pl = ta.pivotlow(cmf, leftLenL, rightLenL)
//---------------------------------------------------------------------------------------------------------------MAIN label
//drawLabel(rightLenH, ph, label.style_none, size.tiny , "T", color.red, myRSI)
//drawLabel(rightLenL, pl, label.style_none, size.tiny, "B", color.green, myRSI)
plotchar(ph, location=location.top, size = size.tiny, text = "H", textcolor = color.red, show_last=15, color=#000000)
plotchar(pl, location=location.bottom, size = size.tiny, text = "L", textcolor = color.green, show_last=15, color=#000000)
You can manage your labels of a fixed number by using a var label array. When your condition is met and you are adding a new label to the array, you also remove and delete the last label in order to keep a fixed number of labels.
drawLabel(_offset, _pivot, _style, _size, _text, _textcolor) =>
var label[] label_array = array.new_label(qtyLabelsInput)
if (_pivot)
array.unshift(label_array, label.new(bar_index[_offset], _pivot, style=_style, size=_size, text=_text, textcolor=_textcolor))
label.delete(array.pop(label_array))
Found the solution, thanks again for your help
drawLabel(_pivot, _y, _style, _size, _text, _textcolor) =>
var label[] label_array = array.new_label(qtyLabelsInput)
if _pivot
array.unshift(label_array, label.new(bar_index[_pivot], y=_y, style=_style, size=_size, text=_text, textcolor=_textcolor))
label.delete(array.pop(label_array))

Three.js Buffer Geometry Animation

I have Three.js scene that basically spreads out a bunch of triangles over a given area.
geometry = new THREE.BufferGeometry();
geometry.dynamic = true;
positions = new Float32Array(triangles * 3 * 3);
const normals = new Float32Array(triangles * 3 * 3);
const colors = new Float32Array(triangles * 3 * 3);
const color = new THREE.Color();
const n = 200,
n2 = n / 2; // triangle's spread distance
const d = 1000,
d2 = d / 2; // individual triangle size
const pA = new THREE.Vector3();
const pB = new THREE.Vector3();
const pC = new THREE.Vector3();
const cb = new THREE.Vector3();
const ab = new THREE.Vector3();
for (let i = 0; i < positions.length; i += 9) {
// position
const x = Math.random() * n - n2;
const y = Math.random() * n - n2;
const z = Math.random() * n - n2;
const ax = x + Math.random() * d - d2;
const ay = y + Math.random() * d - d2;
const az = z + Math.random() * d - d2;
const bx = x + Math.random() * d - d2;
const by = y + Math.random() * d - d2;
const bz = z + Math.random() * d - d2;
const cx = x + Math.random() * d - d2;
const cy = y + Math.random() * d - d2;
const cz = z + Math.random() * d - d2;
positions[i] = ax;
positions[i + 1] = ay;
positions[i + 2] = az;
positions[i + 3] = bx;
positions[i + 4] = by;
positions[i + 5] = bz;
positions[i + 6] = cx;
positions[i + 7] = cy;
positions[i + 8] = cz;
if (i === 0) console.log(positions);
// flat face normals
pA.set(ax, ay, az);
pB.set(bx, by, bz);
pC.set(cx, cy, cz);
cb.subVectors(pC, pB);
ab.subVectors(pA, pB);
cb.cross(ab);
cb.normalize();
const nx = cb.x;
const ny = cb.y;
const nz = cb.z;
normals[i] = nx;
normals[i + 1] = ny;
normals[i + 2] = nz;
normals[i + 3] = nx;
normals[i + 4] = ny;
normals[i + 5] = nz;
normals[i + 6] = nx;
normals[i + 7] = ny;
normals[i + 8] = nz;
// colors
const vx = x / n + 0.5;
const vy = y / n + 0.5;
const vz = z / n + 0.5;
color.setRGB(vx, vy, vz);
colors[i] = color.r;
colors[i + 1] = color.g;
colors[i + 2] = color.b;
colors[i + 3] = color.r;
colors[i + 4] = color.g;
colors[i + 5] = color.b;
colors[i + 6] = color.r;
colors[i + 7] = color.g;
colors[i + 8] = color.b;
}
geometry.setAttribute(
"position",
new THREE.BufferAttribute(positions, 3)
);
geometry.setAttribute("normal", new THREE.BufferAttribute(normals, 3));
geometry.setAttribute("color", new THREE.BufferAttribute(colors, 3));
geometry.computeBoundingSphere();
let material = new THREE.MeshPhongMaterial({
color: 0xaaaaaa,
specular: 0xffffff,
shininess: 250,
side: THREE.DoubleSide,
vertexColors: true,
});
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
What I would like to do is have the triangles start close together, and expand in every direction randomly.
How can I create an animation loop that updates the triangles position?
I have been using this example code from three.js's website:
https://github.com/mrdoob/three.js/blob/master/examples/webgl_interactive_buffergeometry.html
edit: I was able to make the triangles expand with this.mesh.scale.z += 0.005 but the triangles themselves grow as well. Is there a way to hold the triangle size the same but change the area the cover?
You could use morph targets.
Made a codepen available here:
https://codepen.io/cdeep/pen/WNENOmK
The code is inspired from the three.js example:
https://threejs.org/examples/?q=morph#webgl_morphtargets.
While assigning the initial position for each vertex, also assign the final position of where the vertex must end up after expanding.
In essence,
const positions = new Float32Array( triangles * 3 * 3 );
const morphedPositions = new Float32Array(triangles * 3 * 3);
...
...
geometry.morphAttributes.position = [];
geometry.morphAttributes.position[ 0 ] = new THREE.BufferAttribute( morphedPositions, 3);
mesh.morphTargetInfluences[ 0 ] = morphValue;
Animate the morphValue to influence how far the position attribute is closer to morphedPositions.

Affine transformation approximation of two triangles using Eigen SVD

In the code below, I am trying to implement the algorithm for affine transformation approximation presented here
#include <Eigen/Eigen>
#include <iostream>
using namespace Eigen;
using namespace std;
int main(int argc, char **argv)
{
Vector3f x1 (3.0f, 0.0f, 0.0f);
Vector3f x2 (0.0f, 2.0f, 0.0f);
Vector3f x3 (0.0f, 0.0f, 1.0f);
Vector3f translation(1.0f, -2.0f, 2.0f);
Vector3f x_bar1 = x1 + translation;
Vector3f x_bar2 = x2 + translation;
Vector3f x_bar3 = x3 + translation;
std::cerr << "x_bar1 = \n" << x_bar1 << std::endl;
std::cerr << "x_bar2 = \n" << x_bar2 << std::endl;
std::cerr << "x_bar3 = \n" << x_bar3 << std::endl;
Vector3f c = (x1+x2+x3)/3.0f;
Vector3f c_bar = (x_bar1+x_bar2+x_bar3)/3.0f;
Vector3f y1,y2,y3, y_bar1,y_bar2,y_bar3;
y1 = x1 - c;
y2 = x2 - c;
y3 = x3 - c;
y_bar1 = x_bar1 - c_bar;
y_bar2 = x_bar2 - c_bar;
y_bar3 = x_bar3 - c_bar;
Matrix3f H;
H = y1*y_bar1.transpose()+y2*y_bar2.transpose()+y3*y_bar3.transpose();
JacobiSVD<Matrix3f> svd(H, ComputeFullU | ComputeFullV);
Matrix3f R; R = svd.matrixV()*svd.matrixU().transpose();
Vector3f t; t = c-R*c_bar;
std::cerr << "R = \n" << R << std::endl;
std::cerr << "t = \n" << t << std::endl;
}
But I get wrong answer:
R =
0.836735 -0.244898 -0.489796
-0.244898 0.632653 -0.734694
-0.489796 -0.734694 -0.469388
t =
0.142857
3.71429
1.42857
Is the problem in the implementation or in the algorithm? If so, what is the correction?
You can check your computation using e.g. this SVD-demonstration.
For your example I get
H =
6 -2 -1
-2 2.666666667 -0.666666667
-1 -0.666666667 0.666666667
U =
1 0 0
0 0.957092026 -0.289784149
0 -0.289784149 -0.957092026
V =
1 0 0
0 0.957092026 -0.289784149
0 -0.289784149 -0.957092026
R =
1 0 0
0 1 0
0 0 1
So the algorithm is correct! Probably your H is wrong.
I will attempt to shove in a Java program derived from your original C++. Based on this I offer the following suggestions:
1) I think you have switched c and c_bar in t = c-R*c_bar;
2) The paper (which you can get freely at http://www.math.pku.edu.cn/teachers/yaoy/Fall2011/arun.pdf) suggests that having coplanar points Qi is uncommon, but in fact it is quite likely, because these points have their centroid subtracted, and so if you add them all up you get a zero vector. Therefore it is entirely likely that you will get a reflection back instead of a rotation and you need to switch column signs as in equation (18) and in my Java.
package uk.co.demon.mcdowella.apachemathuser;
/*
Affine transformation approximation of two triangles using Eigen SVD
In the code below, I am trying to implement the algorithm for affine transformation approximation presented here
*/
/*
#include <Eigen/Eigen>
#include <iostream>
using namespace Eigen;
using namespace std;
*/
import org.apache.commons.math.linear.ArrayRealVector;
import java.util.Arrays;
import org.apache.commons.math.linear.Array2DRowRealMatrix;
import org.apache.commons.math.linear.SingularValueDecompositionImpl;
import org.apache.commons.math.linear.RealMatrix;
import org.apache.commons.math.linear.RealVector;
public class FindRotation
{
/** Utility function to return XY' */
private static RealMatrix xyt(RealVector x, RealVector y)
{
Array2DRowRealMatrix xx = new Array2DRowRealMatrix(x.getData());
return xx.multiply((
new Array2DRowRealMatrix(y.getData())).transpose());
}
// int main(int argc, char **argv)
public static void main(String[] s)
{
// Vector3f x1 (3.0f, 0.0f, 0.0f);
ArrayRealVector x1 = new ArrayRealVector(new double[]
{3.0f, 0.0f, 0.0f});
// Vector3f x2 (0.0f, 2.0f, 0.0f);
ArrayRealVector x2 = new ArrayRealVector(new double[]
{0.0f, 2.0f, 0.0f});
// Vector3f x3 (0.0f, 0.0f, 1.0f);
ArrayRealVector x3 = new ArrayRealVector(new double[]
{0.0f, 0.0f, 1.0f});
// Vector3f translation(1.0f, -2.0f, 2.0f);
ArrayRealVector translation = new ArrayRealVector(new double[]
{1.0f, -2.0f, 2.0f});
Array2DRowRealMatrix rot;
if (true)
{ // test - do simple rotation
rot = new Array2DRowRealMatrix(new double[][] {
new double[] {1.0, 0.0, 0.0},
new double[] {0.0, 0.0, -1.0},
new double[] {0.0, 1.0, 0.0},
});
System.out.println("Rot determinant is " + rot.getDeterminant());
}
else
{
rot = new Array2DRowRealMatrix(new double[][] {
new double[] {1.0, 0.0, 0.0},
new double[] {0.0, 1.0, 0.0},
new double[] {0.0, 0.0, 1.0},
});
}
// Vector3f x_bar1 = x1 + translation;
RealVector x_bar1 = rot.operate(x1).add(translation);
// Vector3f x_bar2 = x2 + translation;
RealVector x_bar2 = rot.operate(x2).add(translation);
// Vector3f x_bar3 = x3 + translation;
RealVector x_bar3 = rot.operate(x3).add(translation);
// std::cerr << "x_bar1 = \n" << x_bar1 << std::endl;
System.out.println("x_bar1 = ");
System.out.println(x_bar1);
// std::cerr << "x_bar2 = \n" << x_bar2 << std::endl;
System.out.println("x_bar2 = ");
System.out.println(x_bar2);
// std::cerr << "x_bar3 = \n" << x_bar3 << std::endl;
System.out.println("x_bar3 = ");
System.out.println(x_bar3);
// Vector3f c = (x1+x2+x3)/3.0f;
RealVector c = x1.add(x2).add(x3).mapDivide(3.0f);
// Vector3f c_bar = (x_bar1+x_bar2+x_bar3)/3.0f;
RealVector c_bar =
x_bar1.add(x_bar2).add(x_bar3).mapDivide(3.0f);
// Vector3f y1,y2,y3, y_bar1,y_bar2,y_bar3;
// y1 = x1 - c;
RealVector y1 = x1.subtract(c);
// y2 = x2 - c;
RealVector y2 = x2.subtract(c);
// y3 = x3 - c;
RealVector y3 = x3.subtract(c);
// y_bar1 = x_bar1 - c_bar;
RealVector y_bar1 = x_bar1.subtract(c_bar);
// y_bar2 = x_bar2 - c_bar;
RealVector y_bar2 = x_bar2.subtract(c_bar);
// y_bar3 = x_bar3 - c_bar;
RealVector y_bar3 = x_bar3.subtract(c_bar);
System.out.println("Y1 " + y1 + " (Q1)");
System.out.println("Y2 " + y2 + " (Q2)");
System.out.println("Y3 " + y3 + " (Q3)");
System.out.println("YB1 " + y_bar1);
System.out.println("YB2 " + y_bar2);
System.out.println("YB3 " + y_bar3);
// Matrix3f H;
// H = y1*y_bar1.transpose()+y2*y_bar2.transpose()+y3*y_bar3.transpose();
RealMatrix h = xyt(y1, y_bar1).add(xyt(y2,y_bar2)).add(
xyt(y3, y_bar3));
// JacobiSVD<Matrix3f> svd(H, ComputeFullU | ComputeFullV);
SingularValueDecompositionImpl svd =
new SingularValueDecompositionImpl(h);
System.out.println("Singular values are " + Arrays.toString(svd.getSingularValues()));
// Matrix3f R; R = svd.matrixV()*svd.matrixU().transpose();
RealMatrix r = svd.getV().multiply(svd.getUT());
double rDeterminant = r.getDeterminant();
System.out.println("Determinant " + rDeterminant);
if (rDeterminant < 0.0)
{ // coplanar case - which is not surprising because Q in the original paper sum to 0.0
// because centroid is subtracted from each of them. Try alternate r
RealMatrix changeLastColumn = new Array2DRowRealMatrix(new double[][] {
new double[] {1.0, 0.0, 0.0},
new double[] {0.0, 1.0, 0.0},
new double[] {0.0, 0.0, -1.0}});
RealMatrix vd = svd.getV().multiply(changeLastColumn);
r = vd.multiply(svd.getUT());
rDeterminant = r.getDeterminant();
System.out.println("Determinant at second go is " + rDeterminant);
}
// Vector3f t; t = c-R*c_bar;
// Note - original transpose seems to be the wrong way round
RealVector t = c_bar.subtract(r.operate(c));
// std::cerr << "R = \n" << R << std::endl;
System.out.println("R = ");
System.out.println(r);
// std::cerr << "t = \n" << t << std::endl;
System.out.println("t = ");
System.out.println(t);
// Apply supposed answer
RealVector z1 = r.operate(x1).add(t);
RealVector z2 = r.operate(x2).add(t);
RealVector z3 = r.operate(x3).add(t);
System.out.println("Z1 "+ z1);
System.out.println("Z2 "+ z2);
System.out.println("Z3 "+ z3);
}
/*
But I get wrong answer:
R =
0.836735 -0.244898 -0.489796
-0.244898 0.632653 -0.734694
-0.489796 -0.734694 -0.469388
t =
0.142857
3.71429
1.42857
Is the problem in the implementation or in the algorithm? If so, what is the correction?
*/
}

help with drawing a wedge with opengl es

I'm trying to do some basic opengl es programming to get started on the basics.
I have a drawing function tries to draw a wedge of a circle. Something is going wrong because its actually just drawing a circle.
I'm still just trying to grasp the basics of opengl es here. Heres what I have so far.
- (void)drawView
{
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glViewport(0, 0, 60, 60);
int i;
float angle_start=90;
float angle_stop=180;
int segments=360;
float const angle_step = (angle_stop - angle_start)/segments;
GLfloat *arc_vertices;
arc_vertices = malloc(2*sizeof(GLfloat) * (segments+2));
arc_vertices[0] = arc_vertices[1] = 0.0;
for(i=0; i<segments+1; i++) {
arc_vertices[2 + 2*i ] = cos(angle_start + i*angle_step);
arc_vertices[2 + 2*i + 1] = sin(angle_start + i*angle_step);
}
glVertexPointer(2, GL_FLOAT, 0, arc_vertices);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(1.0f, 0.0f, 0.0f, 1.0f);
glDrawArrays(GL_TRIANGLE_FAN, 0, segments+2);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
free(arc_vertices);
}
sin() and cos() take radians as input:
float angle_start=90;
float angle_stop=180;
int segments=360;
float const angle_step = (angle_stop - angle_start)/segments;
GLfloat* verts = (GLfloat*)malloc(2*sizeof(GLfloat) * (segments+2));
unsigned int pos = 0;
verts[pos++] = 0;
verts[pos++] = 0;
float radius = 10;
for( unsigned int i = 0; i < segments; ++i )
{
float rads = (angle_start + i*angle_step) * (3.14159 / 180);
verts[pos++] = ( cos( rads ) * radius );
verts[pos++] = ( sin( rads ) * radius );
}
glVertexPointer(2, GL_FLOAT, 0, verts);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(1.0f, 0.0f, 0.0f, 1.0f);
glDrawArrays(GL_TRIANGLE_FAN, 0, segments+1);
glDisableClientState(GL_VERTEX_ARRAY);
I see something wrong. You access vertices[i] and vertices[i+1], but i always increments by 1.
Try replacing
GLfloat vertices[720];
with
GLfloat vertices[2*720];
and replace
vertices[i]=p1;
vertices[i+1]=p2;
by
vertices[2*i]=p1;
vertices[2*i+1]=p2;
this works.
Anti aliasing is horrible but it works.
[credit1
-(void)drawcircelofSlice2
{
amt+=20;
if(amt>360.0)
{
amt=0;
}
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glViewport(20, 20, 50,50);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrthof(30.0f, 30.0f, -1.5f, 1.5f, -1.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
float x=0;
float y=0;
//float radius=20;
float lowAngle=0;
float highAngle=(amt/360) *360;
// float highAngle=360;
float numcirclePts=360;
lowAngle=DEGREES_TO_RADIANS(lowAngle);
highAngle=DEGREES_TO_RADIANS(highAngle);
float res=numcirclePts;
float angle=lowAngle;
float anglerange=highAngle-lowAngle;
float angleAdder=anglerange/ res;
int k=0;
GLfloat verts[720];
for (int i = 0; i < numcirclePts; i++){
verts[k] = x + cos(angle) ;
verts[k+1] = y - sin(angle) ;
angle += angleAdder;
k+=2;
}
verts[0] = x;
verts[1] = y;
k = 2;
for (int i = 2; i < numcirclePts; i++){
verts[k] = verts[k];
verts[k+1] = verts[k+1];
k+=2;
}
glVertexPointer(2, GL_FLOAT, 0, verts);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(0.0f, 0.0f, 1.0f, 0.0f);
glDrawArrays(GL_TRIANGLE_FAN, 0, numcirclePts);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
glDisableClientState(GL_VERTEX_ARRAY);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}

Resources