I am trying to setup my Vertices and Texture Coordinates such that I have a single Quad ( 2 triangles ) covering the entire OpenGL Window. I want to then update the texture as I receive images from my camera.
It is working except for that my streaming texture never fills the entire screen and the texture pixels are repeated outside of the intended texture ( see images below ).
How do I setup my vertices and texture coordinates so that my texture will fill the quad without repeating?
Here is how I generate my texture:
glGenTextures(1, &m_dataFrame);
glBindTexture(GL_TEXTURE_2D, m_dataFrame);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, WIDTH, HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, frame.bits());
// CUSTOM
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Initial Attempts:
// Setup the vertices and texture coordinates
float scaler = 1.0f;
static const GLfloat squareVertices[] = {
-(1.0f/scaler), -(1.0f/scaler), 0.0f, // First triangle bottom left
(1.0f/scaler), -(1.0f/scaler), 0.0f,
-(1.0f/scaler), (1.0f/scaler), 0.0f,
(1.0f/scaler), -(1.0f/scaler), 0.0f, // Second triangle bottom right
(1.0f/scaler), (1.0f/scaler), 0.0f,
-(1.0f/scaler), (1.0f/scaler), 0.0f,
};
static const GLfloat textureVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, -1.0f,
1.0f, 1.0f,
-1.0f, 1.0f,
};
This results in the following image:
// Setup the vertices and texture coordinates
float x = 1.0f;
float y = 1.0f;
static const GLfloat squareVertices[] = {
-(x*2), -(y*2), 0.0f, // First triangle bottom left
(x*2), -(y*2), 0.0f,
-(x*2), (y*2), 0.0f,
(x*2), -(y*2), 0.0f, // Second triangle bottom right
(x*2), (y*2), 0.0f,
-(x*2), (y*2), 0.0f,
};
static const GLfloat textureVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, -1.0f,
1.0f, 1.0f,
-1.0f, 1.0f,
};
This results in the following image:
This is what I've used for rendering a textured quad on the Raspberry Pi (in Swift, but the syntax is similar to what you'd use for C). Setting up the texture parameters:
glEnable(GLenum(GL_TEXTURE_2D))
glGenTextures(1, &cameraOutputTexture)
glActiveTexture(GLenum(GL_TEXTURE0));
glBindTexture(GLenum(GL_TEXTURE_2D), cameraOutputTexture)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_NEAREST)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_NEAREST)
uploading the texture:
glActiveTexture(GLenum(GL_TEXTURE0))
glBindTexture(GLenum(GL_TEXTURE_2D), cameraOutputTexture)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGB, 1280, 720, 0, GLenum(GL_RGB), GLenum(GL_UNSIGNED_BYTE), buffers[Int(currentBuffer)].start)
and rendering it:
glClear(GLenum(GL_COLOR_BUFFER_BIT))
shader.use()
let squareVertices:[GLfloat] = [
-1.0, -1.0,
1.0, -1.0,
-1.0, 1.0,
1.0, 1.0,
]
let textureCoordinates:[GLfloat] = [
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
]
glVertexAttribPointer(positionAttribute, 2, GLenum(GL_FLOAT), 0, 0, squareVertices)
glVertexAttribPointer(textureCoordinateAttribute, 2, GLenum(GL_FLOAT), 0, 0, textureCoordinates)
glUniform1i(GLint(textureCoordinateAttribute), 1)
glDrawArrays(GLenum(GL_TRIANGLE_STRIP), 0, 4)
eglSwapBuffers(display, surface)
I might be rotating the image to account for rotation in the camera that I'm using as an input source, so you may need to tweak the vertices or coordinates if you see your image rendering upside-down.
Related
I'm loading an image to the screen using DirectX11, but the image becomes more saturated. Left is the loaded image and right is the original image.
Strange thing is that this happens only when I'm loading large images. The resolution of the image I'm trying to print is 1080 x 675 and my window size is 1280 x 800. Also, although the original image has a high resolution the image becomes a little pixelated. This is solved if I use LINEAR filter but I'm curious why this is happening. I'm fairly new to DirectX and I'm struggling..
Vertex data:
_vertices[0].p = { -1.0f, 1.0f, 0.0f };
//_vertices[0].c = { 1.0f, 1.0f, 1.0f, 1.0f };
_vertices[0].t = { 0.0f, 0.0f };
_vertices[1].p = { 1.0f, 1.0f, 0.0f };
//_vertices[1].c = { 1.0f, 1.0f, 1.0f, 1.0f };
_vertices[1].t = { 1.0f, 0.0f };
_vertices[2].p = { -1.0f, -1.0f, 0.0f };
//_vertices[2].c = { 1.0f, 1.0f, 1.0f, 1.0f };
_vertices[2].t = { 0.0f, 1.0f };
_vertices[3].p = { 1.0f, -1.0f, 0.0f };
//_vertices[3].c = { 1.0f, 1.0f, 1.0f, 1.0f };
_vertices[3].t = { 1.0f, 1.0f };
Vertex layout:
D3D11_INPUT_ELEMENT_DESC elementDesc[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
//{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0},
{ "TEXTURE", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 28, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
Sampler state:
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(samplerDesc));
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_POINT;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
device->CreateSamplerState(&samplerDesc, &g_defaultSS);
Vertex shader:
struct VS_IN
{
float3 p : POSITION;
//float4 c : COLOR;
float2 t : TEXTURE;
};
struct VS_OUT
{
float4 p : SV_POSITION;
//float4 c : COLOR0;
float2 t : TEXCOORD0;
};
VS_OUT VSMain(VS_IN input)
{
VS_OUT output = (VS_OUT)0;
output.p = float4(input.p, 1.0f);
//output.c = input.c;
output.t = input.t;
return output;
}
Pixel shader:
Texture2D g_texture : register(t0);
SamplerState g_sampleWrap : register(s0);
float4 PSMain(VS_OUT input) : SV_Target
{
float4 vColor = g_texture.Sample(g_sampleWrap, input.t);
return vColor; //* input.c;
}
This is most likely an issue of colorspace. If rendering using 'linear colors' (which is recommended), then likely your image is in sRGB colorspace. You can let the texture hardware deal with the gamma by using DXGI_FORMAT_*_SRGB formats for your texture, or you can do it directly in the shader.
See these resources:
Linear-Space Lighting (i.e. Gamma)
Chapter 24. The Importance of Being Linear, GPU Gems 3
Gamma-correct rendering
In the DirectX Tool Kit, you can do various load-time tricks as well. See DDSTextureLoader and WICTextureLoader.
I'm trying to make an app that draws an image in Android Studio with NDK and JNI to call C++ Code using OpenGL ES. I have went through the tutorial how to do this in OpenGL at : https://learnopengl.com/#!Getting-started/Textures, which use GLSL 330 core. However, OpenGL ES 3.0 isn't supported on the Android emulator (Note in this link :https://developer.android.com/ndk/guides/stable_apis.html).
Therefore, I have to use the GLSL ES #version 100 , which doesn't support "layout", "in" and "out" in the shaders below. How should I edit them so that they could run in #version 100 and is there any change in the source code if I edit them? Thanks for your attention and your help.
Update: After searching around, I found out that I could use glGetAttributeLocation to get the location of variable in vertex shader instead of using layout(location=0). However, there's no VAO in GLSL ES #version 100 so I still couldn't firgure out how it work without VAO.
My Vertex shader:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 ourColor;
out vec2 TexCoord;
void main()
{
gl_Position = vec4(position, 1.0f);
ourColor = color;
// We swap the y-axis by substracing our coordinates from 1. This is done because most images have the top y-axis inversed with OpenGL's top y-axis.
// TexCoord = texCoord;
TexCoord = vec2(texCoord.x, 1.0 - texCoord.y);
}
fragment Shader:
#version 330 core
in vec3 ourColor;
in vec2 TexCoord;
out vec4 color;
// Texture samplers
uniform sampler2D ourTexture1;
uniform sampler2D ourTexture2;
void main()
{
// Linearly interpolate between both textures (second texture is only slightly combined)
color = mix(texture(ourTexture1, TexCoord), texture(ourTexture2, TexCoord), 0.2);
}
Initialize VAO,VBO,:
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] = {
// Positions // Colors // Texture Coords
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // Top Right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Left
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // Top Left
};
GLuint indices[] = { // Note that we start from 0!
0, 1, 3, // First Triangle
1, 2, 3 // Second Triangle
};
GLuint VBO, VAO, ;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, );
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
// TexCoord attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(6 * sizeof(GLfloat)));
glEnableVertexAttribArray(2);
glBindVertexArray(0); // Unbind VAO
Draw an image:
// Clear the colorbuffer
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
ourShader.Use();
// Bind Textures using texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture1"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture2"), 1);
// Draw container
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
Finally, I manage to solve it. In GLSL ES 100, There is no in out and layout(location=0) as i mentioned above. Therefore, I have to replace them:
in => attribute
out => varying
And completely remove layout(location=0) because there's no such thing does the same thing in GLSL ES 100 as far as I aware.
Because of the removal of layout(location=0), we have to tell the program the location of our vertex data, which are position,color and textCoord in this case. With layout(location=0), we could simple put the location 0 in the
glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
Without it, we first need to get the location of the vertex attribute with:
GLint mPosition= glGetAttributeLocation(ourProgram,"position")
then replace the constant number 0,1 or 2 with my predefined mPosition :
glVertexAttribPointer(mPosition,3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(mPosition);;
The same on color and textCoord.
below are my edited vertex shader, fragment shader and how to initialize VBO and pass in our data:
Edited vertex shader for GLSL ES 100:
auto gVertexShader =
"attribute vec3 position;\n"
"attribute vec3 color;\n"
"attribute vec2 texCoord;\n"
"varying vec3 ourColor;\n"
"varying vec2 TexCoord;\n"
"void main()\n"
"{\n"
"gl_Position = vec4(position,1.0); // Add the xOffset to the x position of the vertex position\n"
"ourColor = color;\n"
"TexCoord= vec2(texCoord.x,1.0-texCoord.y);\n"
"}\n";
Edited fragment shader for GLSL ES 100:
static const char FRAGMENT_SHADER[] =
"#version 100\n"
"precision mediump float;\n"
"varying vec4 vColor;\n"
"void main() {\n"
" gl_FragColor = vColor;\n"
"}\n";
my InitBuffer function:
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] = {
// Positions // Colors // Texture Coords
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // Top Right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Left
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // Top Left
};
GLuint indices[] = { // Note that we start from 0!
0, 1, 3, // First Triangle
1, 2, 3 // Second Triangle
};
void initBuffers()
{
GLint mPosition,mCorlor,mTextCoord;
GLuint VBOs[2]; // Initialize an buffer to store all the verticles and transfer them to the GPU
glGenBuffers(2, VBOs); // Generate VBO
glBindBuffer(GL_ARRAY_BUFFER, VBOs[0]);//Bind verticles array for OpenGL to use
glBufferData(GL_ARRAY_BUFFER, sizeof(recVertices), recVertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, VBOs[1]);//Bind the indices for information about drawing sequence
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// 1. set the vertex attributes pointers
// Position Attribute
mPosition=glGetAttributeLocation(Program, "position");
glVertexAttribPointer(mPosition, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(mPosition);
// Color Attribute
mColor=glGetAttributeLocation(Program, "color");
glVertexAttribPointer(mColor, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(mColor);
//Texture Coordinate Attribute
mTextCoord=glGetAttributeLocation(Program, "textCoord")'
glVertexAttribPointer(mTextCoord, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(6 * sizeof(GLfloat)));
glEnableVertexAttribArray(mTextCoord);
}
Then we replace VAO binding in our draw function with the call of initBuffer() function
// Clear the colorbuffer
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
ourShader.Use();
// Bind Textures using texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture1);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture1"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
glUniform1i(glGetUniformLocation(ourShader.Program, "ourTexture2"), 1);
// Draw container
initBuffers();
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
Hope this will help who has the same problem with me. Please feel free to ask me if you have any question :).
I'm trying to make an app that uses OpenGL ES 3.0 to draw an image. I use Android Studio with NDK, JNI for this. When I run I got this error in the Logcat, what am i doing wrong? Your attention and help is very much appreciated.
Error:
Could not compile shader 35633:
Vertex shader compilation failed.
ERROR: 0:2: 'position' : not a legal layout qualifier id
ERROR: 0:2: '' : the location is not within attribute range [0, MAX_ATTRIBUTES-1]
ERROR: 0:3: 'position' : not a legal layout qualifier id
ERROR: 0:3: '' : the location is not within attribute range [0, MAX_ATTRIBUTES-1]
ERROR: 0:4: 'position' : not a legal layout qualifier id
ERROR: 0:4: '' : the location is not within attribute range [0, MAX_ATTRIBUTES-1]
ERROR: 6 compilation errors. No code generated.
My Vertex Shader:
"#version 300 es\n"
"layout (position=0) in vec3 position;\n"
"layout (position=1) in vec3 color;\n"
"layout (position=2) in vec2 texCoord;\n"
"out vec3 ourColor;\n"
"out vec2 TexCoord;\n"
"void main()\n"
"{\n"
"gl_Position = vec4(position,1.0f); // Add the xOffset to the x position of the vertex position\n"
"ourColor = color;\n"
"TexCoord= vec2(texCoord.x,1.0f-texCoord.y);\n"
"}";
My initBuffer() function to set up all the buffer:
GLfloat recVertices[] = {
// Positions // Colors // Texture Coords
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // Top Right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Left
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // Top Left
};
GLuint indices[] = { // Note that we start from 0!
0, 1, 3, // First Triangle
1, 2, 3 // Second Triangle
};
GLunint VAO;
void initBuffers()
{
GLuint VBOs[2], EBO; // Initialize an buffer to store all the verticles and transfer them to the GPU
glGenVertexArrays (1,&VAO);
glGenBuffers(1, VBOs);
glGenBuffers(1, &EBO);
glBindVertexArray (VAO);
// Bind the Vertex Array
glBindBuffer(GL_ARRAY_BUFFER, VBOs[0]);//0. Copy verticles array for OpenGL to use
glBufferData(GL_ARRAY_BUFFER, sizeof(recVertices), recVertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// 1. set the vertex attributes pointers
// Position Attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color Attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
//Texture Coordinate Attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (GLvoid*)(6 * sizeof(GLfloat)));
glEnableVertexAttribArray(2);
glBindVertexArray(0);
}
My generateTexture() function to load the image :
void generateTexture()
{
glGenTextures(1 , &mTexture);
glBindTexture(GL_TEXTURE_2D, mTexture);// Bind our 2D texture so that following set up will be applied
//Set texture wrapping parameter
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_MIRRORED_REPEAT);
//Set texture Filtering parameter
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
//Load the image
int picWidth,picHeight,n;
unsigned char* image = stbi_load("D:\Lighthouse.jpg",&picWidth,&picHeight,0,0);
//Generate the image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB , picWidth , picHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
stbi_image_free(image);// Free the reference to the image
glBindTexture(GL_TEXTURE_2D,0); //Unbind 2D textures
}
And Finally my Render function:
void renderFrame() {
static float grey;
grey += 0.01f;
if (grey > 1.0f) {
grey = 0.0f;
}
generateTexture();
glClearColor(grey+0.05f, grey-0.03f, grey+0.02f, grey-0.04f);
checkGlError("glClearColor");
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glUseProgram(gProgram);
checkGlError("glUseProgram");
glActiveTexture(GL_TEXTURE0);
checkGlError("glActiveTexture");
glBindTexture(GL_TEXTURE_2D,mTexture);
checkGlError("glBindTexture");
GLint mlocation = glGetUniformLocation(gProgram,"ourTexture");
checkGlError("glGetUniformLocation");
glUniform1i(mlocation,0);
checkGlError("glUniform1i");
initBuffers();
glBindVertexArray(VAO);
checkGlError("glBindVertexArray");
glDrawElements(GL_TRIANGLES,6,GL_UNSIGNED_INT,0);
}
Not sure about GLSL ES 3, but in "standard" GLSL this should be layout (location = n), not layout (position = n).
You can have a look at the wiki to get the different layout qualifiers available.
I am trying to draw a cube with different colors on each face using OpenGL ES 2.0. I can only draw a cube in one color now. I knew I need to use VertexAttribPointer in this case instead of Uniform, but I probably added them wrongly. Screen shows nothing after I implement my code. Here is my code, can anybody give me a hand? Thank you so much!!!
public class MyCube {
private FloatBuffer vertexBuffer;
private ShortBuffer drawListBuffer;
private ShortBuffer[] ArrayDrawListBuffer;
private FloatBuffer colorBuffer;
private int mProgram;
//For Projection and Camera Transformations
private final String vertexShaderCode =
// This matrix member variable provides a hook to manipulate
// the coordinates of the objects that use this vertex shader
"uniform mat4 uMVPMatrix;" +
"attribute vec4 vPosition;" +
"void main() {" +
// the matrix must be included as a modifier of gl_Position
// Note that the uMVPMatrix factor *must be first* in order
// for the matrix multiplication product to be correct.
" gl_Position = uMVPMatrix * vPosition;" +
"}";
// Use to access and set the view transformation
private int mMVPMatrixHandle;
private final String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
// number of coordinates per vertex in this array
static final int COORDS_PER_VERTEX = 3;
float cubeCoords[] = {
-0.5f, 0.5f, 0.5f, // front top left 0
-0.5f, -0.5f, 0.5f, // front bottom left 1
0.5f, -0.5f, 0.5f, // front bottom right 2
0.5f, 0.5f, 0.5f, // front top right 3
-0.5f, 0.5f, -0.5f, // back top left 4
0.5f, 0.5f, -0.5f, // back top right 5
-0.5f, -0.5f, -0.5f, // back bottom left 6
0.5f, -0.5f, -0.5f, // back bottom right 7
};
// Set color with red, green, blue and alpha (opacity) values
float color[] = { 0.63671875f, 0.76953125f, 0.22265625f, 1.0f };
float red[] = { 1.0f, 0.0f, 0.0f, 1.0f };
float blue[] = { 0.0f, 0.0f, 1.0f, 1.0f };
private short drawOrder[] = {
0, 1, 2, 0, 2, 3,//front
0, 4, 5, 0, 5, 3, //Top
0, 1, 6, 0, 6, 4, //left
3, 2, 7, 3, 7 ,5, //right
1, 2, 7, 1, 7, 6, //bottom
4, 6, 7, 4, 7, 5};//back (order to draw vertices)
final float[] cubeColor =
{
// Front face (red)
1.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
// Top face (green)
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
// Left face (blue)
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
// Right face (yellow)
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
// Bottom face (cyan)
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 1.0f,
// Back face (magenta)
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f
};
public MyCube() {
// initialize vertex byte buffer for shape coordinates
ByteBuffer bb = ByteBuffer.allocateDirect(
// (# of coordinate values * 4 bytes per float)
cubeCoords.length * 4);
bb.order(ByteOrder.nativeOrder());
vertexBuffer = bb.asFloatBuffer();
vertexBuffer.put(cubeCoords);
vertexBuffer.position(0);
// initialize byte buffer for the draw list
ByteBuffer dlb = ByteBuffer.allocateDirect(
// (# of coordinate values * 2 bytes per short)
drawOrder.length * 2);
dlb.order(ByteOrder.nativeOrder());
drawListBuffer = dlb.asShortBuffer();
drawListBuffer.put(drawOrder);
drawListBuffer.position(0);
// initialize byte buffer for the color list
ByteBuffer cb = ByteBuffer.allocateDirect(
// (# of coordinate values * 2 bytes per short)
cubeColor.length * 4);
cb.order(ByteOrder.nativeOrder());
colorBuffer = cb.asFloatBuffer();
colorBuffer.put(cubeColor);
colorBuffer.position(0);
int vertexShader = MyRenderer.loadShader(GLES20.GL_VERTEX_SHADER,
vertexShaderCode);
int fragmentShader = MyRenderer.loadShader(GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode);
// create empty OpenGL ES Program
mProgram = GLES20.glCreateProgram();
// add the vertex shader to program
GLES20.glAttachShader(mProgram, vertexShader);
// add the fragment shader to program
GLES20.glAttachShader(mProgram, fragmentShader);
// creates OpenGL ES program executables
GLES20.glLinkProgram(mProgram);
}
private int mPositionHandle;
private int mColorHandle;
private final int vertexCount = cubeCoords.length / COORDS_PER_VERTEX;
private final int vertexStride = COORDS_PER_VERTEX * 4; // 4 bytes per vertex
public void draw(float[] mvpMatrix) { // pass in the calculated transformation matrix
// Add program to OpenGL ES environment
GLES20.glUseProgram(mProgram);
// get handle to vertex shader's vPosition member
mPositionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition");
// get handle to fragment shader's vColor member
mColorHandle = GLES20.glGetUniformLocation(mProgram, "vColor");
// Enable a handle to the cube vertices
GLES20.glEnableVertexAttribArray(mPositionHandle);
// Prepare the cube coordinate data
GLES20.glVertexAttribPointer(mPositionHandle, COORDS_PER_VERTEX,
GLES20.GL_FLOAT, false,
vertexStride, vertexBuffer);
// Set color for drawing the triangle
//mColorHandle = GLES20.glGetUniformLocation(mProgram, "vColor");
// Enable a handle to the cube colors
GLES20.glEnableVertexAttribArray(mColorHandle);
// Prepare the cube color data
GLES20.glVertexAttribPointer(mColorHandle, 4, GLES20.GL_FLOAT, false, 16, colorBuffer);
// Set the color for each of the faces
//GLES20.glUniform4fv(mColorHandle, 1, blue, 0);
//***When I add this line of code above, it can show a cube totally in blue.***
// get handle to shape's transformation matrix
mMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix");
// Pass the projection and view transformation to the shader
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mvpMatrix, 0);
// Draw the cube
GLES20.glDrawElements(GLES20.GL_TRIANGLES, drawOrder.length, GLES20.GL_UNSIGNED_SHORT, drawListBuffer);
// Disable vertex array
GLES20.glDisableVertexAttribArray(mPositionHandle);
GLES20.glDisableVertexAttribArray(mColorHandle);
GLES20.glDisableVertexAttribArray(mMVPMatrixHandle);
}
}
Remove the uniform variable vColor declaration from the fragment shader. Define the new per-vertex attribute input variable in the vertex shader, write that value to a varying variable which is output by the vertex shader, and add as a varying input which is read by the fragment shader.
I am trying to understand the perspective view in OpenGL.
What I am trying to do is render two identical triangles, but at different z coordinates, so I assume they should appear at different sizes. Here is my code:
CUSTOMVERTEX Vertices[] =
{
{ 0.5f, 1.0f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f }, // x, y, z, color
{ 0.0f, 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 1.0f },
{ 1.0f, 0.0f, 0.5f, 0.0f, 0.0f, 1.0f, 1.0f },
};
and for drawing
glDrawArrays(GL_TRIANGLES,0, 3);
glTranslatef(0.0f,-1.0f,-1.5f);
glDrawArrays(GL_TRIANGLES,0, 3);
and here is how I init some attributes
glShadeModel(GL_SMOOTH);
glClearDepthf(1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glEnable(GL_CULL_FACE);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustumf(-1.0f, 1.0f, -1.0f, 1.0f, 0.0f, 100.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
but the triangles appear at the same size, just at different locations as I translated Y.
Can someone please explain to me?
You cannot use 0.0 for the perspective projection's near-Z value. It must be a positive number greater than zero. Preferably on the order of 1.0 or so.
As Nicol said you should use numbers greater than 0 for frustrum construction. I strongly suggest you to read this article to understand why it is so.