Bone Vertex Skinning transformation - animation

I am trying to implement skeletal animation in OpenGL. At this stage, I am trying to have my bone system deform a mesh. I believe the problem is with how I create the matrix Bone::getMatrix().
Background:
Each bone has an offset to it's parent, and if a bone does not have a parent the offset will instead be the world position of the entire skeleton. A collection of bones comprise a skeleton. Currently, for testing purposes, I have 3 bones in a skeleton deforming a skinned mesh.
Again, the issue is that when deforming the mesh, the visual result does not produce the desired effect.
Code:
glm::mat4 Bone::getMatrix(){
Bone* parent = getParent();
glm::mat4 rot = glm::mat4(1.0f);
glm::mat4 trans= glm::mat4(1.0f);
glm::vec3 orient = getOrientation();
//Create Rotation Matrix;
rot *= glm::rotate(orient.x,1.0f,0.0f,0.0f);
rot *= glm::rotate(orient.y,0.0f,1.0f,0.0f);
rot *= glm::rotate(orient.z,0.0f,0.0f,1.0f);
//Create Translation Matrix
if(parent != nullptr){
glm::vec3 OFF = offset;
trans = glm::translate(OFF.x,OFF.y,OFF.z);
}else{
glm::vec3 pos = getPosition();
//trans = glm::translate(pos.x,pos.y,pos.z);
}
glm::mat4 PM = glm::mat4(1.0f);
if(parent != nullptr){
PM *= glm::inverse( parent->getMatrix() );
}else{
}
return PM * trans * rot ;
}
Deforming the mesh:
void Armature::draw(const float& dt){
if(mesh == nullptr){
postMsg("reference copy to mesh is nullptr","draw",34);
return;
}
ArrayVertex3* mesh_Vertex = mesh->getVertex3();
ArrayVertex3 render_vertex;
if(mesh_Vertex == nullptr){
postMsg("mesh vertex structure is null during draw","draw",30);
}else{
render_vertex.reserve(mesh_Vertex->size());
}
int i=0,j=0,k=0;
glPushMatrix();
glTranslatef(10,0,0);
glBegin(GL_POINTS);
for( i =0; i<this->getBoneCount(); i++){
glVertex3fv( glm::value_ptr( glm::vec3( glm::vec4() * getBone(i)->getMatrix() ) ));
}
glEnd();
glBegin(GL_LINES);
for( i =1; i<this->getBoneCount(); i++){
// glVertex3fv( glm::value_ptr(getBone(i)->getTranslationMatrix() * glm::mat3(getBone(i)->getRotationMatrix()) ));
// glVertex3fv( glm::value_ptr(getBone(i-1)->getTranslationMatrix() * glm::mat3(getBone(i-1)->getRotationMatrix()) ));
}
glEnd();
glPopMatrix();
//Loop all vertices
for( i = 0 ; i < int(mesh->getNumberVertexes()); i ++){
render_vertex[i].vtx = glm::vec3(0.0f, 0.0f, 0.0f);
int inf_count = (*mesh_Vertex)[i].numInfluences;
//Loop all this vertices influences
for( j=0; j < inf_count; j ++){
Bone *bone = getBone( (*mesh_Vertex)[i].boneIDs[j] );
float weight = (*mesh_Vertex)[i].boneWeights[j];
glm::vec4 original_vert = glm::vec4((*mesh_Vertex)[i].vtx,1.0f);
glm::vec4 original_norm = glm::vec4((*mesh_Vertex)[i].norm,0.0f);
glm::vec3 T = bone->getTranslationMatrix();
render_vertex[i].vtx += glm::vec3( bone->getMatrix() * original_vert ) * (weight) ;
//Transform normal
render_vertex[i].norm = glm::vec3( bone->getRotationMatrix() * original_norm ) ;
}
(*mesh_Vertex)[i].vtx = render_vertex[i].vtx;
(*mesh_Vertex)[i].norm = glm::normalize(render_vertex[i].norm);
}
}
Result
The rotation is not done according to its joint location

I fount your bone matrix function is a bit chaotic. From my understanding, all the vertexes need to be transformed by global transformation matrix. A global transformation matrix is the concatenation of all the local transformation matrix. The following code is used to calculate global transformation. This article is useful.
http://graphics.ucsd.edu/courses/cse169_w05/2-Skeleton.htm
Some of my previous work using GLSL + assimp + glm
http://www.youtube.com/watch?v=bXBfVl-msYw&list=HL1385555185&feature=mh_lolz
glm::mat4 T = glm::translate(bone->offset);
glm::mat4 R = glm::toMat4(bone->rotate);
glm::mat4 S = glm::scale(bone->scale);
glm::mat4 localTransform = T * R * S;
if(bone->getID()!=0)
{
bone->globalTransform = bone->getParent()->globalTransform * localTransform;
}
else
{
bone->globalTransform = localTransform;
}
for (uint i = 0 ; i < bone->getChildren().size() ; i++) {
updateTransform(bone->getChildren()[i]);
}

Related

"Mirroring" a PShape object (rotation / translation issue) with Processing

I would like to "mirror" a PShape object like in the picture below:
I know how to display multiple shapes and how to invert them (screenshot below) but things get complicated when I have to rotate them (and probably translating them) so as they "stick" to the preceding shapes (first picture).
I've been trying to compute an angle with the first 2 vertices of the original shape (irregular quadrilateral) and the atan2() function but to no avail.
I would really appreciate if someone could help figuring how to solve this problem.
int W = 20;
int H = 20;
int D = 20;
PShape object;
void setup(){
size(600, 600, P2D);
smooth();
}
void draw(){
background(255);
pushMatrix();
translate(width/2, height/1.3);
int td = -1;
for (int i = 0; i < 6; i++){
translate(0, td*H*2);
scale(-1, 1);
rotate(PI);
object();
td *= -1;
}
popMatrix();
}
void object() {
beginShape(QUADS);
vertex(-20, 20);
vertex(20, 0);
vertex(20, -20);
vertex(-20, -20);
endShape();
}
To do what you want you have to create a shape by 2 given angles for the top and the bottom of the shape angleT and `angleBĀ“. The origin (0,0) is in the center of the shape. This causes that the pivots for the rotations are in the middle of the slopes of the shape :
int W = 40;
int H = 40;
float angleT = -PI/18;
float angleB = PI/15;
PShape object;
void object() {
float H1 = -H/2 + W*tan(angleB);
float H2 = H/2 + W*tan(angleT);
beginShape(QUADS);
vertex(-W/2, -H/2);
vertex(W/2, H1);
vertex(W/2, H2);
vertex(-W/2, H/2);
endShape();
}
When you draw the parts, then you should distinguish between even and odd parts. The parts have to be flipped horizontal by inverting the y axis (scale(1, -1)). The even parts have to be rotated by the double of angleB and the odd parts have to be rotated by the doubled of angleT. For the rotation, the center of the slopes (pivots) have to be translated to the origin:
void setup(){
size(600, 600, P2D);
smooth();
}
void draw(){
background(255);
translate(width/2, height/2);
float HC1 = -H/2 + W*tan(angleB)/2;
float HC2 = H/2 + W*tan(angleT)/2;
for (int i = 0; i < 15; i++){
float angle = (i % 2 == 0) ? -angleB : -angleT;
float HC = (i % 2 == 0) ? HC1 : HC2;
translate(0, -HC);
rotate(angle*2);
translate(0, -HC);
object();
scale(1, -1);
}
}
The algorithm works for any angle, positive and negative including 0.
This algorithm can be further improved. Let's assume you have a quad, defined by 4 points (p0, p1, p2, p3):
float[] p0 = {10, 0};
float[] p1 = {40, 10};
float[] p2 = {60, 45};
float[] p3 = {0, 60};
PShape object;
void object() {
beginShape(QUADS);
vertex(p0[0], p0[1]);
vertex(p1[0], p1[1]);
vertex(p2[0], p2[1]);
vertex(p3[0], p3[1]);
endShape();
}
Calculate the the minimum, maximum, centerpoint, pivots and angles:
float minX = min( min(p0[0], p1[0]), min(p2[0], p3[0]) );
float maxX = max( max(p0[0], p1[0]), max(p2[0], p3[0]) );
float minY = min( min(p0[1], p1[1]), min(p2[1], p3[1]) );
float maxY = max( max(p0[1], p1[1]), max(p2[1], p3[1]) );
float cptX = (minX+maxX)/2;
float cptY = (minY+maxY)/2;
float angleB = atan2(p1[1]-p0[1], p1[0]-p0[0]);
float angleT = atan2(p2[1]-p3[1], p2[0]-p3[0]);
float HC1 = p0[1] + (p1[1]-p0[1])*(cptX-p0[0])/(p1[0]-p0[0]);
float HC2 = p3[1] + (p2[1]-p3[1])*(cptX-p3[0])/(p2[0]-p3[0]);
Draw the shape like before:
for (int i = 0; i < 6; i++){
float angle = (i % 2 == 0) ? -angleB : -angleT;
float HC = (i % 2 == 0) ? HC1 : HC2;
translate(cptX, -HC);
rotate(angle*2);
translate(-cptX, -HC);
object();
scale(1, -1);
}
Another approach would be to stack the shape on both sides:
For this you have to know the heights of the pivots (HC1, HC2) and the angles (angleB, angleT). So this can be implemented based on both of the above approaches.
Define the pivot points and the directions of the top and bottom edge:
PVector dir1 = new PVector(cos(angleB), sin(angleB));
PVector dir2 = new PVector(cos(angleT), sin(angleT));
PVector pv1 = new PVector(0, HC1); // or PVector(cptX, HC1)
PVector pv2 = new PVector(0, HC2); // or PVector(cptX, HC2)
Calculate the intersection point (X) of the both edges. Of course this will work only if the
edges are not parallel:
PVector v12 = pv2.copy().sub(pv1);
PVector nDir = new PVector(dir2.y, -dir2.x);
float d = v12.dot(nDir) / dir1.dot(nDir);
PVector X = pv1.copy().add( dir1.copy().mult(d) );
The stack algorithm works as follows:
for (int i = 0; i < 8; i++){
float fullAngle = angleT-angleB;
float angle = fullAngle * floor(i/2);
if ((i/2) % 2 != 0)
angle += fullAngle;
if (i % 2 != 0)
angle = -angle;
float flip = 1.0;
if (i % 2 != 0)
flip *= -1.0;
if ((i/2) % 2 != 0)
flip *= -1.0;
pushMatrix();
translate(X.x, X.y);
rotate(angle);
scale(1, flip);
rotate(-angleB);
translate(-X.x, -X.y);
object();
popMatrix();
}

Assimp animation bone transformation

Recently I'm working on bone animation import, so I made a 3d minecraft-like model with some IK technique to test Assimp animation import. Ouput format is COLLADA(*.dae),and the tool I used is Blender. On the programming side, my enviroment is opengl/glm/assimp. I think these information for my problem is enough.One thing, the animation of the model, I just record 7 unmove keyframe for testing assimp animation.
First, I guess my transformation except local transform part is correct, so let the function only return glm::mat4(1.0f), and the result show the bind pose(not sure) model. (see below image)
Second, Turn back the value glm::mat4(1.0f) to bone->localTransform = transform * scaling * glm::mat4(1.0f);, then the model deform. (see below image)
Test image and model in blender:
(bone->localTransform = glm::mat4(1.0f) * scaling * rotate; : this image is under ground :( )
The code here:
void MeshModel::UpdateAnimations(float time, std::vector<Bone*>& bones)
{
for each (Bone* bone in bones)
{
glm::mat4 rotate = GetInterpolateRotation(time, bone->rotationKeys);
glm::mat4 transform = GetInterpolateTransform(time, bone->transformKeys);
glm::mat4 scaling = GetInterpolateScaling(time, bone->scalingKeys);
//bone->localTransform = transform * scaling * glm::mat4(1.0f);
//bone->localTransform = glm::mat4(1.0f) * scaling * rotate;
//bone->localTransform = glm::translate(glm::mat4(1.0f), glm::vec3(0.5f));
bone->localTransform = glm::mat4(1.0f);
}
}
void MeshModel::UpdateBone(Bone * bone)
{
glm::mat4 parentTransform = bone->getParentTransform();
bone->nodeTransform = parentTransform
* bone->transform // assimp_node->mTransformation
* bone->localTransform; // T S R matrix
bone->finalTransform = globalInverse
* bone->nodeTransform
* bone->inverseBindPoseMatrix; // ai_mesh->mBones[i]->mOffsetMatrix
for (int i = 0; i < (int)bone->children.size(); i++) {
UpdateBone(bone->children[i]);
}
}
glm::mat4 Bone::getParentTransform()
{
if (this->parent != nullptr)
return parent->nodeTransform;
else
return glm::mat4(1.0f);
}
glm::mat4 MeshModel::GetInterpolateRotation(float time, std::vector<BoneKey>& keys)
{
// we need at least two values to interpolate...
if ((int)keys.size() == 0) {
return glm::mat4(1.0f);
}
if ((int)keys.size() == 1) {
return glm::mat4_cast(keys[0].rotation);
}
int rotationIndex = FindBestTimeIndex(time, keys);
int nextRotationIndex = (rotationIndex + 1);
assert(nextRotationIndex < (int)keys.size());
float DeltaTime = (float)(keys[nextRotationIndex].time - keys[rotationIndex].time);
float Factor = (time - (float)keys[rotationIndex].time) / DeltaTime;
if (Factor < 0.0f)
Factor = 0.0f;
if (Factor > 1.0f)
Factor = 1.0f;
assert(Factor >= 0.0f && Factor <= 1.0f);
const glm::quat& startRotationQ = keys[rotationIndex].rotation;
const glm::quat& endRotationQ = keys[nextRotationIndex].rotation;
glm::quat interpolateQ = glm::lerp(endRotationQ, startRotationQ, Factor);
interpolateQ = glm::normalize(interpolateQ);
return glm::mat4_cast(interpolateQ);
}
glm::mat4 MeshModel::GetInterpolateTransform(float time, std::vector<BoneKey>& keys)
{
// we need at least two values to interpolate...
if ((int)keys.size() == 0) {
return glm::mat4(1.0f);
}
if ((int)keys.size() == 1) {
return glm::translate(glm::mat4(1.0f), keys[0].vector);
}
int translateIndex = FindBestTimeIndex(time, keys);
int nextTranslateIndex = (translateIndex + 1);
assert(nextTranslateIndex < (int)keys.size());
float DeltaTime = (float)(keys[nextTranslateIndex].time - keys[translateIndex].time);
float Factor = (time - (float)keys[translateIndex].time) / DeltaTime;
if (Factor < 0.0f)
Factor = 0.0f;
if (Factor > 1.0f)
Factor = 1.0f;
assert(Factor >= 0.0f && Factor <= 1.0f);
const glm::vec3& startTranslate = keys[translateIndex].vector;
const glm::vec3& endTrabslate = keys[nextTranslateIndex].vector;
glm::vec3 delta = endTrabslate - startTranslate;
glm::vec3 resultVec = startTranslate + delta * Factor;
return glm::translate(glm::mat4(1.0f), resultVec);
}
The code idea is referenced from Matrix calculations for gpu skinning and Skeletal Animation With Assimp.
Overall, I fectch all the information from assimp to MeshModel and save it to the bone structure, so I think the information is alright?
The last thing, my vertex shader code:
#version 330 core
#define MAX_BONES_PER_VERTEX 4
in vec3 position;
in vec2 texCoord;
in vec3 normal;
in ivec4 boneID;
in vec4 boneWeight;
const int MAX_BONES = 100;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 boneTransform[MAX_BONES];
out vec3 FragPos;
out vec3 Normal;
out vec2 TexCoords;
out float Visibility;
const float density = 0.007f;
const float gradient = 1.5f;
void main()
{
mat4 boneTransformation = boneTransform[boneID[0]] * boneWeight[0];
boneTransformation += boneTransform[boneID[1]] * boneWeight[1];
boneTransformation += boneTransform[boneID[2]] * boneWeight[2];
boneTransformation += boneTransform[boneID[3]] * boneWeight[3];
vec3 usingPosition = (boneTransformation * vec4(position, 1.0)).xyz;
vec3 usingNormal = (boneTransformation * vec4(normal, 1.0)).xyz;
vec4 viewPos = view * model * vec4(usingPosition, 1.0);
gl_Position = projection * viewPos;
FragPos = vec3(model * vec4(usingPosition, 1.0f));
Normal = mat3(transpose(inverse(model))) * usingNormal;
TexCoords = texCoord;
float distance = length(viewPos.xyz);
Visibility = exp(-pow(distance * density, gradient));
Visibility = clamp(Visibility, 0.0f, 1.0f);
}
If my question above, lack of code or describe vaguely, please let me know, Thanks!
Edit:(1)
In additional, my bone information like this(code fetching part):
for (int i = 0; i < (int)nodeAnim->mNumPositionKeys; i++)
{
BoneKey key;
key.time = nodeAnim->mPositionKeys[i].mTime;
aiVector3D vec = nodeAnim->mPositionKeys[i].mValue;
key.vector = glm::vec3(vec.x, vec.y, vec.z);
currentBone->transformKeys.push_back(key);
}
had some transformation vector, so my code above glm::mat4 transform = GetInterpolateTransform(time, bone->transformKeys);,Absloutely, get the same value from it. I'm not sure I made a nomove keyframe animation that provide the transform values is true or not (of course it has 7 keyframe).
A keyframe contents like this(debug on head bone):
7 different keyframe, same vector value.
Edit:(2)
If you want to test my dae file, I put it in jsfiddle, come and take it :). Another thing, in Unity my file work correctly, so I think maybe not my local transform occurs the problem, it seems the problem could be some other like parentTransform or bone->transform...etc? I aslo add local transform matrix with all bone, But can not figure out why COLLADA contains these value for my unmove animation...
For amounts of testing, and finally found the problem is the UpdateBone() part.
Before I point out my problem, I need to say the series of matrix multiplication let me confuse, but when I found the solution, it just make me totally (maybe just 90%) realize all the matrix doing.
The problem comes from the article,Matrix calculations for gpu skinning. I assumed the answer code is absolutely right and don't think any more about the matrix should be used. Thus, misusing matrix terribly transfer my look into the local transform matrix. Back to the result image in my question section is bind pose when I change the local transform matrix to return glm::mat4(1.0f).
So the question is why the changed make the bind pose? I assumed the problem must be local transform in bone space, but I'm wrong. Before I give the answer, look at the code below:
void MeshModel::UpdateBone(Bone * bone)
{
glm::mat4 parentTransform = bone->getParentTransform();
bone->nodeTransform = parentTransform
* bone->transform // assimp_node->mTransformation
* bone->localTransform; // T S R matrix
bone->finalTransform = globalInverse
* bone->nodeTransform
* bone->inverseBindPoseMatrix; // ai_mesh->mBones[i]->mOffsetMatrix
for (int i = 0; i < (int)bone->children.size(); i++) {
UpdateBone(bone->children[i]);
}
}
And I make the change as below:
void MeshModel::UpdateBone(Bone * bone)
{
glm::mat4 parentTransform = bone->getParentTransform();
if (boneName == "Scene" || boneName == "Armature")
{
bone->nodeTransform = parentTransform
* bone->transform // when isn't bone node, using assimp_node->mTransformation
* bone->localTransform; //this is your T * R matrix
}
else
{
bone->nodeTransform = parentTransform // This retrieve the transformation one level above in the tree
* bone->localTransform; //this is your T * R matrix
}
bone->finalTransform = globalInverse // scene->mRootNode->mTransformation
* bone->nodeTransform //defined above
* bone->inverseBindPoseMatrix; // ai_mesh->mBones[i]->mOffsetMatrix
for (int i = 0; i < (int)bone->children.size(); i++) {
UpdateBone(bone->children[i]);
}
}
I don't know what the assimp_node->mTransformation give me before, only the description "The transformation relative to the node's parent" in the assimp documentation. For some testing, I found that the mTransformation is the bind pose matrix which the current node relative to parent if I use these on bone node. Let me give a picture that captured the matrix on head bone.
The left part is the transform which is fetched from assimp_node->mTransformation.The right part is my unmove animation's localTransform which is calculated by the keys from nodeAnim->mPositionKeys, nodeAnim->mRotationKeys and nodeAnim->mScalingKeys.
Look back what I did, I made a bind pose tranformation twice, so the image in my question section look just seperate but not spaghetti :)
On the last, let me show what I did before the unmove animation testing and correct animation result.
(For everyone, If my concept is wrong , please point me out! Thx.)

Depth / Ranged Image to Point Cloud

I am trying to create a pointcloud from a depth image / ranged image / displacement map (grey values which correspond to z depth).
Here is the depth image of an trailer vehicle:
http://de.tinypic.com/r/rlvp80/8
Camera Parameters, no rotation: x,y,z: 0,25,0
I get this pointcloud with a aslope ground plane, which should be even (just a quad at 0,0,0 with no rotation):
rotated:
http://de.tinypic.com/r/2mnglj6/8
Here is my code, I try to do this:
normalizedCameraRay = normalize(CameraRay);
Point_in_3D = zDepthValueOfPixelXY /normalizedCameraRay.zValue; //zValue is <= 1
This doesnt seem to work. If I use zDepthValueOfPixelXY * normalizedCameraRay, I got Radial Disortion (everything looks a bit like a sphere).
Any Ideas how to fix this? I attached my code.
Thank you for your ideas
float startPositionX = -((float)image.rows)/2;
float startPositionY = -((float)image.cols)/2;
glm::vec3 cameraWorldPosition(x,y,z);
qDebug() << x << "," << y << ", " << z;
for(int a = 0; a < image.rows;a++)
{
for(int b = 0; b < image.cols;b++)
{
float movementX = startPositionX+b;
float movementY = startPositionY+a;
glm::vec3 ray(movementX, movementY, focal_length);
ray = glm::normalize(ray);
float rayFactor = ((float)image.at<u_int16_t>(a,b)) / ray[2];
glm::vec3 completeRay = rayFactor*ray;
pointCloud[a][b] = completeRay;
/*
ray = completeRay - cameraWorldPosition;
ray = glm::normalize(ray);
rayFactor = ((float)image.at<u_int16_t>(a,b)) / ray[2];
ray = rayFactor * ray;
pointCloud[a][b] = ray;*/
/*
ray = glm::vec3(startPositionX+b,startPositionY-a, image.at<u_int16_t>(a,b));
pointCloud[a][b] = ray;
*/
}
}

Problems rendering a Polar Zonohedron in Processing

I've recently been looking into Zonohedrons and Rob Bell made beautiful ones. I had a play with the free Polar Zonohedron Sketchup Plugin and thought about playing with the geometry using Processing. So far I've open up the plugin/Ruby script and tried to port it directly, but I am not experienced with Ruby and have been using the Sketchup Ruby API reference.
The geometry part of the code is mostly in the polar_zonohedron function:
def polar_zonohedron #frequency, pitch = atan(sqrt(2)/2), len = 1.0 # frequency,pitch,length
mo = Sketchup.active_model
mo.start_operation "polar_zonohedron"
prompts = ["Frequency", "Pitch in radians", "Length"]
values = [8, "atan( sqrt(2)/2 )", 12.inch]
results = inputbox prompts, values, "Polar Zonohedron"
return if not results # This means that the user canceld the operation
ents = mo.active_entities
grp = ents.add_group
ents = grp.entities
grp.frequency = results[0]
grp.pitch = eval( results[1] )
grp.length = results[2]
pts=[]
#we begin by setting pts[0] to the origin
pts[0] = Geom::Point3d.new(0,0,0)
vector = Geom::Vector3d.new(cos(grp.pitch),0,sin(grp.pitch) ) #tilt pitch vector up the xz plane
vector.length = grp.length
#Using the origin as the initial generator we iterate thru each zone of the zonohedron
#our first task is to define the four points of the base rhomb for this zone
#at the end the pts[3] becomes our new origin for the rhomb of the next zone
1.upto(grp.frequency-1){ |i|
p_rotate = Geom::Transformation.rotation( pts[0] , Geom::Vector3d.new(0,0,1), i*2*PI/grp.frequency )
#obtain the other three points of the rhomb face
pts[1] = pts[0].transform vector
pts[3] = pts[1].transform( p_rotate )
pts[2] = pts[3].transform( vector )
#we now have the 4 points which make this zone's base rhomb
#so we rotate around the origin frequency times making a star pattern of faces
0.upto(grp.frequency-1){ |j|
f_rotate = Geom::Transformation.rotation( Geom::Point3d.new(0,0,0) , Geom::Vector3d.new(0,0,1), j*2*PI/grp.frequency )
ents.add_face( pts.collect{|p| p.transform(f_rotate)} )
}
#set the origin for the rhomb of the next zone
pts[0] = pts[3]
}
mo.commit_operation
end
I've understood the loops but am slightly confused by transforms:
pts[1] = pts[0].transform vector
pts[3] = pts[1].transform( p_rotate )
pts[2] = pts[3].transform( vector )
As far as I can tell pts[1] is the vector addiction of pts[0] and vector,
and pts[3] is pts[1] multiplied by the p_rotate rotation matrix. Would pts[2] also be an addition (between pts[3] and vector )?
Here's what my attempt looks like so far:
//a port attempt of Rob Bell's polar_zonohedron.rb script - http://zomebuilder.com/
int frequency = 3;
float pitch = atan(sqrt(2)/2);
float length = 24;
ArrayList<Face> faces = new ArrayList<Face>();
void setup(){
size(400,400,P3D);
strokeWeight(3);
setupZome();
}
void setupZome(){
faces.clear();
PVector[] pts = new PVector[4];
pts[0] = new PVector();
PVector vector = new PVector(cos(pitch),0,sin(pitch));
vector.mult(length);
for(int i = 1 ; i < frequency; i++){
PMatrix3D p_rotate = new PMatrix3D();
p_rotate.rotate(i * TWO_PI / frequency, 0,0,1);
//PVector v = new PVector();
//p_rotate.mult(pts[0],v);
//pts[0] = v;
pts[1] = PVector.add(pts[0],vector);
pts[3] = new PVector();
p_rotate.mult(pts[1],pts[3]);
pts[2] = PVector.add(pts[3],vector);
for(int j = 0; j < frequency; j++){
PMatrix3D f_rotate = new PMatrix3D();
f_rotate.rotate(j*2*PI/frequency , 0,0,1);
Face f = new Face();
for(PVector pt : pts){
PVector p = new PVector();
f_rotate.mult(pt,p);
f.add(p.get());
}
faces.add(f);
}
pts[0] = pts[3];
}
}
void draw(){
background(255);
lights();
translate(width * .5, height * .5,0);
rotateY(map(mouseX,0,width,-PI,PI));
rotateX(map(mouseY,0,height,-PI,PI));
drawAxes(100);
pushMatrix();
translate(0,0,-frequency * length * .25);
for(Face f : faces){
beginShape(mousePressed ? QUADS : POINTS);
for(PVector p : f.pts) vertex(p.x,p.y,p.z);
endShape();
}
popMatrix();
}
void keyPressed(){
if(keyCode == UP && frequency < 32) frequency++;
if(keyCode == DOWN && frequency > 2) frequency--;
setupZome();
}
void drawAxes(int size){
stroke(192,0,0);
line(0,0,0,size,0,0);
stroke(0,192,0);
line(0,0,0,0,size,0);
stroke(0,0,192);
line(0,0,0,0,0,size);
}
class Face{
ArrayList<PVector> pts = new ArrayList<PVector>();
Face(){}
void add(PVector p){
if(pts.size() <= 4) pts.add(p);
}
}
I feel I'm close, but I'm getting the loop conditionals and vertex indices wrong.
Any tips on how to fix this?
I was very close, but not paying attention to all the details.
Turns out I get the correct mesh if I don't increment the rotation on p_rotate:
p_rotate.rotate(TWO_PI / frequency, 0,0,1);
instead of
p_rotate.rotate(i * TWO_PI / frequency, 0,0,1);
Here is the full code listing:
//a port attempt of Rob Bell's polar_zonohedron.rb script - http://zomebuilder.com/
int frequency = 3;
float pitch = atan(sqrt(2)/2);
float length = 24;
ArrayList<Face> faces = new ArrayList<Face>();
void setup(){
size(400,400,P3D);
strokeWeight(3);
setupZome();
}
void setupZome(){
faces.clear();
PVector[] pts = new PVector[4];
pts[0] = new PVector();
PVector vector = new PVector(cos(pitch),0,sin(pitch));
vector.mult(length);
for(int i = 1 ; i < frequency-1; i++){
PMatrix3D p_rotate = new PMatrix3D();
p_rotate.rotate(TWO_PI / frequency, 0,0,1);
pts[1] = PVector.add(pts[0],vector);
pts[3] = new PVector();
p_rotate.mult(pts[1],pts[3]);
pts[2] = PVector.add(pts[3],vector);
for(int j = 0; j < frequency; j++){
PMatrix3D f_rotate = new PMatrix3D();
f_rotate.rotate(j*2*PI/frequency , 0,0,1);
Face f = new Face();
for(PVector pt : pts){
PVector p = new PVector();
f_rotate.mult(pt,p);
f.add(p.get());
}
faces.add(f);
}
pts[0] = pts[3];
}
}
void draw(){
background(255);
lights();
translate(width * .5, height * .5,0);
rotateY(map(mouseX,0,width,-PI,PI));
rotateX(map(mouseY,0,height,-PI,PI));
drawAxes(100);
pushMatrix();
translate(0,0,-frequency * length * .25);
for(Face f : faces){
beginShape(mousePressed ? QUADS : POINTS);
for(PVector p : f.pts) vertex(p.x,p.y,p.z);
endShape();
}
popMatrix();
}
void keyPressed(){
if(keyCode == UP && frequency < 32) frequency++;
if(keyCode == DOWN && frequency > 3) frequency--;
setupZome();
}
void drawAxes(int size){
stroke(192,0,0);
line(0,0,0,size,0,0);
stroke(0,192,0);
line(0,0,0,0,size,0);
stroke(0,0,192);
line(0,0,0,0,0,size);
}
class Face{
ArrayList<PVector> pts = new ArrayList<PVector>();
Face(){}
void add(PVector p){
if(pts.size() <= 4) pts.add(p);
}
}
And here a couple of screenshots:

Drawing a circle with a sector cut out in OpenGL ES 1.1

I'm trying to draw the following shape using OpenGL ES 1.1. And well, I'm stuck, I don't really know how to go about it.
My game currently uses Android's Canvas API, which isn't hardware accelerated, so I'm rewriting it with OpenGL ES. The Canvas class has a method called drawArc which makes drawing this shape very very easy; Canvas.drawArc
Any advice/hints on doing the same with OpenGL ES?
Thank you for reading.
void gltDrawArc(unsigned int const segments, float angle_start, float angle_stop)
{
int i;
float const angle_step = (angle_stop - angle_start)/segments;
GLfloat *arc_vertices;
arc_vertices = malloc(2*sizeof(GLfloat) * (segments+2));
arc_vertices[0] = arc_vertices[1] = 0.
for(i=0; i<segments+1; i++) {
arc_vertices[2 + 2*i ] = cos(angle_start + i*angle_step);
arc_vertices[2 + 2*i + 1] = sin(angle_start + i*angle_step);
}
glVertexPointer(2, GL_FLOAT, 0, arc_vertices);
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_TRIANGLE_FAN, 0, segments+2);
free(arc_vertices);
}
What about just sampling the circle at discrete angles and drawing a GL_TRIANGLE_FAN?
EDIT: Something like this will just draw a sector of a unit circle around the origin in 2D:
glBegin(GL_TRIANGLE_FAN);
glVertex2f(0.0f, 0.0f);
for(angle=startAngle; angle<=endAngle; ++angle)
glVertex2f(cos(angle), sin(angle));
glEnd();
Actually take this more as pseudocode, as sin and cos usually work on radians and I'm using degrees, but you should get the point.
I am new to android programming so I am sure there is probably a better way to do this. But I was following the OpenGL ES 1.0 tutorial on the android developers site http://developer.android.com/resources/tutorials/opengl/opengl-es10.html which walks you through drawing a green triangle. You can follow the link and you will see most of the code I used there. I wanted to draw a circle on the triangle. The code I added is based on the above example posted by datenwolf. And is shown in snippets below:
public class HelloOpenGLES10Renderer implements GLSurfaceView.Renderer {
// the number small triangles used to make a circle
public int segments = 100;
public float mAngle;
private FloatBuffer triangleVB;
// array to hold the FloatBuffer for the small triangles
private FloatBuffer [] segmentsArray = new FloatBuffer[segments];
private void initShapes(){
.
.
.
// stuff to draw holes in the board
int i = 0;
float angle_start = 0.0f;
float angle_stop = 2.0f * (float) java.lang.Math.PI;
float angle_step = (angle_stop - angle_start)/segments;
for(i=0; i<segments; i++) {
float[] holeCoords;
FloatBuffer holeVB;
holeCoords = new float [ 9 ];
// initialize vertex Buffer for triangle
// (# of coordinate values * 4 bytes per float)
ByteBuffer vbb2 = ByteBuffer.allocateDirect(holeCoords.length * 4);
vbb2.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
holeVB = vbb2.asFloatBuffer(); // create a floating point buffer from the ByteBuffer
float x1 = 0.05f * (float) java.lang.Math.cos(angle_start + i*angle_step);
float y1 = 0.05f * (float) java.lang.Math.sin(angle_start + i*angle_step);
float z1 = 0.1f;
float x2 = 0.05f * (float) java.lang.Math.cos(angle_start + i+1*angle_step);
float y2 = 0.05f * (float) java.lang.Math.sin(angle_start + i+1*angle_step);
float z2 = 0.1f;
holeCoords[0] = 0.0f;
holeCoords[1] = 0.0f;
holeCoords[2] = 0.1f;
holeCoords[3] = x1;
holeCoords[4] = y1;
holeCoords[5] = z1;
holeCoords[6] = x2;
holeCoords[7] = y2;
holeCoords[8] = z2;
holeVB.put(holeCoords); // add the coordinates to the FloatBuffer
holeVB.position(0); // set the buffer to read the first coordinate
segmentsArray[i] = holeVB;
}
}
.
.
.
public void onDrawFrame(GL10 gl) {
.
.
.
// Draw hole
gl.glColor4f( 1.0f - 0.63671875f, 1.0f - 0.76953125f, 1.0f - 0.22265625f, 0.0f);
for ( int i=0; i<segments; i++ ) {
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, segmentsArray[i]);
gl.glDrawArrays(GL10.GL_TRIANGLES, 0, 3);
}
}

Resources