float coords[] = {
-1.0f, 1.0f, 0.0f, // 0, Top Left
-1.0f, -1.0f, 0.0f, // 1, Bottom Left
1.0f, -1.0f, 0.0f, // 2, Bottom Right
1.0f, 1.0f, 0.0f, // 3, Top Right
};
float texCoords[] = {
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f,
};
on draw:
gl.glDrawArrays(GL10.GL_TRIANGLE_FAN, 0, coords.length/dimension);
draw normally, but
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, coords.length/dimension);
this only draw the half square, why?
For this to work the order of the points should be: TL, BL, TR, BR.
When you specify a fan, the points go around the very first point. Each triangle is composed from that very first point, the next point on the list and the last point from the previous triangle.
With the strip it's different. Strip triangles are using last two points from the previous triangle and the new one on the list. This has a side effect: every triangle has the opposite winding (CW than CCW, then CW again and so on).
Related
I am trying to create a mesh which contains more than 1 triangle that I can then tessellate. One triangle works fine but when trying to create another triangle in my mesh I get strange behaviour.
This is my vertex list.
vertices[0].position = XMFLOAT3(0.0f, 2.0f, 0.0f); // Top.
vertices[0].texture = XMFLOAT2(0.0f, 1.0f);
vertices[0].normal = XMFLOAT3(0.0f, 0.0f, -1.0f);
vertices[1].position = XMFLOAT3(0.0f, 0.0f, 0.0f); // Bottom left.
vertices[1].texture = XMFLOAT2(0.0f, 0.0f);
vertices[1].normal = XMFLOAT3(0.0f, 0.0f, -1.0f);
vertices[2].position = XMFLOAT3(2.0f, 0.0f, 0.0f); // Bottom right.
vertices[2].texture = XMFLOAT2(1.0f, 0.0f);
vertices[2].normal = XMFLOAT3(0.0f, 0.0f, -1.0f);
vertices[3].position = XMFLOAT3(0.0f, 2.0f, 0.0f); // Top.
vertices[3].texture = XMFLOAT2(0.0f, 1.0f);
vertices[3].normal = XMFLOAT3(0.0f, 0.0f, -1.0f);
I've taken screen shots to show the behavior.
Pic 1 is taken when the camera is at (0, 0, -10). Pic 2 is when the camera is at (0, 0, 10). Pic 3 is taken at (0, 0, -10) after I changed vertices[3] to
vertices[3].position = XMFLOAT3(0.0f, -2.0f, 0.0f);
http://imgur.com/a/DCZfm
There are 6 values in my index array and my topology is D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST.
If anyone could help I would greatly appreciate it, I'm really confused right now!
My question is quite trivial I believe, I'm using OpenGL ES 2.0 to draw a simple 2D scene.
I have a background texture that stretches the whole screen and another texture of a flower (or shel I say sprite?) that drawn at a specific location on screen.
So the trivial why i can think of doing it is to call glDrawArrays twice, one with the vertices of the background texture, and another one with the vertices of the flower texture.
Is that the right way? if so, is that mean that for 10 flowers i'll need to call glDrawArrays 10 times?
And what about blending? what if i want to blend the flower with the background, i need both the background and flower pixel colors and that may be a problem with two draws no?
Or is it possible to do it in one draw? if so how can I create a shader that knows if it now processing the background texture vertex or the flower texture vertex?
Or is it possible to do it in one draw?
The problem with one draw is that the shader needs to know if the current vertex is a background vertex (than use the background texture color) or a flower vertex( than use the flower texture color), and I don't know how to do it.
Here is how I use one draw call to draw the background image stretches the whole screen and the flower is half size centered.
- (void)renderOnce {
//... set program, clear color..
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, backgroundTexture);
glUniform1i(backgroundTextureUniform, 2);
glActiveTexture(GL_TEXTURE3);
glBindTexture(GL_TEXTURE_2D, flowerTexture);
glUniform1i(flowerTextureUniform, 3);
static const GLfloat allVertices[] = {
-1.0f, -1.0f, // background texture coordinates
1.0f, -1.0f, // to draw in whole screen
-1.0f, 1.0f, //
1.0f, 1.0f,
-0.5f, -0.5f, // flower texture coordinates
0.5f, -0.5f, // to draw half screen size
-0.5f, 0.5f, // and centered
0.5f, 0.5f, //
};
// both background and flower texture coords use the whole texture
static const GLfloat backgroundTextureCoordinates[] = {
0.0f, 0.0f,
1.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
};
static const GLfloat flowerTextureCoordinates[] = {
0.0f, 0.0f,
1.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
};
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, 0, 0, allVertices);
glVertexAttribPointer(backgroundTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, backgroundTextureCoordinates);
glVertexAttribPointer(flowerTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, flowerTextureCoordinates);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
You have two choices:
Call glDrawArrays for every texture you want to draw, this will be slow if you have more than 10-20 textures, to speed it up thought you can use hardware vbo
Batch the vertices(vertices,texture coords,color) of all the sprites you want to draw in one array and use a texture atlas(a texture that has all of the pictures you want to draw in it) and draw all this with one glDrawArrays
The second way is obviously the better and the right one.To get an idea of how to do it ,look at my awnser here
I am trying to understand the behavior of gl_vertexID in vertex shaders. For that I am trying to render 2 squares using two glDrawArrays calls one after another. And want to apply red color to only one square using gl_VertexID in vertex as :
out vec4 color;
in vec4 tdk_Vertex;
void main(void)
{
if(gl_VertexID < 4)
{
color = vec4(1.0f, 0.0f, 0.0f, 1.0f);
}
else
{
color = vec4(1.0f, 1.0f, 1.0f, 1.0f);
}
gl_Position = tdk_Vertex;
}
Passing color to fragment shaders.
Square coordinates as :
static GLfloat vertices[] =
{ -0.75f, 0.25f, 0.0f, 1.0f,
-0.75f, 0.5f, 0.0, 1.0f,
-0.25f, 0.5f, 0.0f, 1.0f,
-0.25f, 0.25f, 0.0f, 1.0f,
0.25f, 0.25f, 0.0f, 1.0f,
0.25f, 0.5f, 0.0f, 1.0f,
0.75f, 0.5f, 0.0f, 1.0f,
0.75f, 0.25f, 0.0f, 1.0f};
Making draw calls as :
for(int i=0; i<8; i+=4)
{
glDrawArrays(GL_TRIANGLE_FAN, i, 4);
}
Using Nvidia card, and calling two glDrawArrays calls is displaying the expected result i.e rendering red color to one square and white to other.
Thus, want to know is this correct behaviour or gl_VertexID indices should generated during glDrawArrays call so that both squares have same red color?
I am using 2 glDrawArrays calls , so my understanding is that both squares should be red according to specification :
http://www.opengl.org/sdk/docs/manglsl/xhtml/gl_VertexID.xml
Want to test it for glsl 300 es.
In the case of glDrawArrays, the gl_VertexID is intended to be the index of the vertex within the buffer. Your first draw call renders the indices on the range [0, 4), so those are the values that gl_VertexID will take. Your second draw call renders the indices on the range [4, 8), and those are the values that gl_VertexID will take.
I have an input texture that is 852x640 and an output texture that is 612x612. I am passing the input through a shader and want the output to be scaled and cropped properly. I'm having trouble getting the squareCoordinates, textureCoordinates and viewPorts to work properly together.
I do not want to just crop, I want to scale it as well to get the most amount of the image as possible. If I were using Photoshop I'd do this in two steps (in OpenGL I'm trying to do this in one step):
Scale the image to 612x814
Crop off the excess 101px at each side
I'm using standard square vertices and texture vertices:
static const GLfloat squareVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f,
};
static const GLfloat squareTextureVertices[] = {
0.0f, 0.0f,
1.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f
}
I don't exactly know what the viewPort should be.
Viewport would be 612x612 pixels.
To scale and crop original quad the easiest way would be to set vertices to cover 612x612 rect (in your case we leave squareVertices unchanged), but set texture coordinates so left and right sides are cropped out:
static const GLfloat squareTextureVertices[] = {
(852.0f-640.0f)/852.0f*0.5f, 0.0f,
1.0f - (852.0f-640.0f)/852.0f*0.5f, 0.0f,
(852.0f-640.0f)/852.0f*0.5f, 1.0f,
1.0f - (852.0f-640.0f)/852.0f*0.5f, 1.0f
}
I have a cube defined as :
float vertices[] = {
//Vertices according to faces
-1.0f, -1.0f, 1.0f, //Vertex 0
1.0f, -1.0f, 1.0f, //v1
-1.0f, 1.0f, 1.0f, //v2
1.0f, 1.0f, 1.0f, //v3
1.0f, -1.0f, 1.0f, //...
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
};
What are the normals for this cube? I need the actual values for the normals.
Do we need 6 or 12 normals? Since OpenGL ES uses only triangles which means we need 12 normals but I could be wrong.
Normals are specified per-vertex, and since the normals for the three faces that share each vertex are orthogonal, you'll get some really wonky looking results by specifying a cube with just 8 vertices and averaging the three face normals to get the vertex normal. It'll be shaded as a sphere, but shaped like a cube.
You'll instead need to specify 24 vertices, so each face of the cube is drawn without sharing vertices with any other.
As to the values, 'tis dead easy. If we assume that x increases to the right, y increases as we go up, and z increases as we move forwards, the normal for the right-hand side is (1, 0, 0), left is (-1, 0, 0), top side is (0,1,0), etc, etc
To summarise: don't draw a cube, draw 6 quads that just happen to have coincident vertices
The normal of a surface is simply a direction vector. Since the normal will be the same for two surfaces that are coplanar, you will only need 6 surface normals. However, often, it's the case that normals are expected to be defined per vertex, in which case you'll need 36 (one for each vertex of each triangle on each face of the cube).
To compute the normals, simply use the following calculation:
http://www.opengl.org/wiki/Calculating_a_Surface_Normal