How to display the image texture in GLES2.
In below source initializing GLES2 display,surface ..,
Creating offline framebuffer,
Loading the RGBA image to texture,
Clearing the screen by BLUE color,
Trying to display the loaded image texture (.. failed to find correct API for GLES2)
Reading the FBO & writing to a file.
For displaying glEnableClientState&glVertexPointer API's is not supporting in GLES2
How to display the loaded image texture in GLES2.
In the below source getting only blue color in buffer got from glReadPixels
unsigned char *video_raw = loadFile("./video.raw");//RGBA raw image
int iConfigs;
EGLConfig eglConfig;
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2,EGL_NONE };
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType)0);
eglInitialize(eglDisplay, 0, 0);
eglBindAPI(EGL_OPENGL_ES_API);
EGLint pi32ConfigAttribs[5];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_NONE;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1, &iConfigs);
EGLSurface eglSurface = eglCreatePbufferSurface(eglDisplay, eglConfig, NULL);
EGLContext eglContext = eglCreateContext(eglDisplay, eglConfig, NULL, ai32ContextAttribs);
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
GLuint fboId = 0;
GLuint renderBufferWidth = 960;
GLuint renderBufferHeight = 540;
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
GLuint renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB565, renderBufferWidth, renderBufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glClearColor(0.0,0.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture_object_id;
glGenTextures(1, &texture_object_id);
glBindTexture(GL_TEXTURE_2D, texture_object_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, renderBufferWidth, renderBufferHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, video_raw);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
/*glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vtx1);
glTexCoordPointer(2, GL_FLOAT, 0, tex1);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);*/
eglSwapBuffers( eglDisplay, eglSurface);
//read & write to a file
int size = 4 * renderBufferHeight * renderBufferWidth;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, renderBufferWidth, renderBufferHeight, GL_RGBA, GL_UNSIGNED_BYTE, data2);
dumptoFile("./read1.raw", size, data2);
Edit 1:
#Rabbid76 ,
Thanks for the reply. When i used your vertx shader "in vec3 inPos;\n" shader compilation failed. i replace "in" with "uniform".
Getting black screen from the below source with your input added.
static const GLuint WIDTH = 960;
static const GLuint HEIGHT = 540;
static const GLchar* vertex_shader_source =
"#version 100\n"
"precision mediump float;\n"
"uniform vec3 inPos;\n"
"uniform vec2 inUV;\n"
"varying vec2 vUV;\n"
"void main(){\n"
" vUV = inUV;\n"
" gl_Position = vec4(inPos, 1.0);\n"
"}\n";
static const GLchar* fragment_shader_source =
"#version 100\n"
"precision mediump float;\n"
"varying vec2 vUV;\n"
"uniform sampler2D u_texture;\n"
"void main(){\n"
" gl_FragColor = texture2D(u_texture, vUV);\n"
"}\n";
int main(int argc, char **argv)
{
unsigned char *video_raw = loadFile("./video.raw");
int iConfigs;
EGLConfig eglConfig;
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType) 0);
eglInitialize(eglDisplay, 0, 0);
eglBindAPI(EGL_OPENGL_ES_API);
EGLint pi32ConfigAttribs[5];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_NONE;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1, &iConfigs);
EGLSurface eglSurface = eglCreatePbufferSurface(eglDisplay, eglConfig, NULL);
EGLContext eglContext = eglCreateContext(eglDisplay, eglConfig, NULL, ai32ContextAttribs);
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
GLuint shader_program, framebuffer, renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, WIDTH, HEIGHT);
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glViewport(0, 0, WIDTH, HEIGHT);
glEnable(GL_TEXTURE_2D);
shader_program = common_get_shader_program(vertex_shader_source, fragment_shader_source);
GLint vert_inx = glGetAttribLocation(shader_program, "inPos");
GLint uv_inx = glGetAttribLocation(shader_program, "inUV");
GLint tex_loc = glGetUniformLocation(shader_program, "u_texture");
GLuint texture_object_id;
glGenTextures(1, &texture_object_id);
glBindTexture(GL_TEXTURE_2D, texture_object_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, WIDTH, HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, video_raw);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
glVertexAttribPointer(vert_inx, 3, GL_FLOAT, GL_FALSE, 0, vtx1);
glEnableVertexAttribArray(vert_inx);
glVertexAttribPointer(uv_inx, 2, GL_FLOAT, GL_FALSE, 0, tex1);
glEnableVertexAttribArray(uv_inx);
glViewport(0,0,renderBufferWidth,renderBufferHeight);
glUseProgram(shader_program);
glUniform1i(tex_loc, 0);
glDrawArrays( GL_TRIANGLE_FAN, 0, 4);
glFlush();
int size = 4 * WIDTH * HEIGHT;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, data2);
dumptoFile("./read1.raw", size, data2);
return EXIT_SUCCESS;
}
You have to use a shader program, and to define the arrays of generic vertex attribute data. See also Vertex Specification.
Create, compile and link a very simple shader program like the following:
const char *sh_vert =
"#version 100\n"\
"precision mediump float;\n"\
"attribute vec3 inPos;\n"\
"attribute vec2 inUV;\n"\
"varying vec2 vUV;\n"\
"void main()\n"\
"{\n"\
" vUV = inUV;\n"\
" gl_Position = vec4(inPos, 1.0);\n"\
"}";
const char *sh_frag =
"#version 100\n"\
"precision mediump float;\n"\
"varying vec2 vUV;\n"\
"uniform sampler2D u_texture;\n"\
"void main()\n"\
"{\n"\
" gl_FragColor = texture2D(u_texture, vUV);\n"\
"}";
GLuint v_sh = glCreateShader( GL_VERTEX_SHADER );
glShaderSource( v_sh, 1, &sh_vert, nullptr );
glCompileShader( v_sh );
GLint status = GL_TRUE;
glGetShaderiv( v_sh, GL_COMPILE_STATUS, &status );
if ( status == GL_FALSE )
{
// compile error
}
GLuint f_sh = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( f_sh, 1, &sh_frag, nullptr );
glCompileShader( f_sh );
status = GL_TRUE;
glGetShaderiv( f_sh, GL_COMPILE_STATUS, &status );
if ( status == GL_FALSE )
{
// compile error
}
GLuint prog = glCreateProgram();
glAttachShader( prog, v_sh );
glAttachShader( prog, f_sh );
glLinkProgram( prog );
status = GL_TRUE;
glGetProgramiv( prog, GL_LINK_STATUS, &status );
if ( status == GL_FALSE )
{
// link error
}
Get the attribute indices and the location of the texture sampler uniform:
GLint vert_inx = glGetAttribLocation( prog, "inPos" );
GLint uv_inx = glGetAttribLocation( prog, "inUV" );
GLint tex_loc = glGetUniformLocation( prog, "u_texture" );
Then define the arrays of generic vertex attribute data by (glVertexAttribPointer) and enable them by glEnableVertexAttribArray:
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
glVertexAttribPointer( vert_inx, 3, GL_FLOAT, GL_FALSE, 0, vtx1);
glEnableVertexAttribArray( vert_inx );
glVertexAttribPointer( uv_inx, 2, GL_FLOAT, GL_FALSE, 0, tex1);
glEnableVertexAttribArray( uv_inx );
Setup the renderbuffer and the framebuffer and adjust the viewport:
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
GLuint renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, renderBufferWidth, renderBufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glViewport(0,0,renderBufferWidth,renderBufferHeight);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
Use the program, set the texture sampler uniform and draw the geometry:
// use the program
glUseProgram( prog );
glUniform1i( tex_loc, 0 ); // 0 == texture unit 0
// draw the geometry
glDrawArrays( GL_TRIANGLE_FAN, 0, 4 );
glUseProgram( 0 );
Finally the image can be read:
int size = 4 * renderBufferHeight * renderBufferWidth;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, renderBufferWidth, renderBufferHeight, GL_RGBA, GL_UNSIGNED_BYTE, data2);
Related
All.
Now I am using unity3D to develop the game. And I want to save the content of each frame by AVFoundation as mp4 file. But I met some problem while I process the snapshot. After I use glReadPixels to obtain the data saved in render buffer, vertex shader and fragment shader is used to help me turn update side down the pixels content. But, after flipping each frame, I found that the quality of each frame has been decreased a lot. So, anyone has met this kind of case before.
Here is the code related.
The snapshot part,
- (void *)snapshot
{
// NSLog(#"snapshot used here");
GLint backingWidth1, backingHeight1;
glBindRenderbufferOES(GL_RENDERBUFFER_OES, mainDisplaySurface->systemColorRB);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth1);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight1);
NSInteger x = 0, y = 0, width = backingWidth1, height = backingHeight1;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
if (transformData == NULL)
{
NSLog(#"transformData initial");
transformData = new loadDataFromeX();
transformData->setupOGL(backingWidth1, backingHeight1);
}
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
transformData->drawingOGL(data);
return data;
}
Here, transformData is an c++ class to help me to the flipping work.
in the function, setOGL(), all the textures and framebuffers have been constructed.
in the function drawingOGL(), the flipping work has been done by passing through the vertex shader and fragment shader. The details of this function is listed below,
int loadDataFromeX::drawingOGL(unsigned char* data)
{
//load data to the texture;
glDisable(GL_DEPTH_TEST);
glBindFramebuffer(GL_FRAMEBUFFER ,transFBO.frameBuffer);
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1., 0., 0., 1.);
glViewport(0, 0, imageWidth, imageHeight);
GLfloat vertex_postions[] = {
-1.0f, -1.0f, -10.0f,
1.0f, -1.0f, -10.0f,
-1.0f, 1.0f, -10.0f,
1.0f, 1.0f, -10.0f
};
GLfloat texture_coords[] = { //left up corner is (0.0)
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
glUseProgram(gl_program_id);
glVertexAttribPointer(gl_attribute_position, 3, GL_FLOAT, GL_FALSE, 0,vertex_postions);
glEnableVertexAttribArray(gl_attribute_position);
glVertexAttribPointer(gl_attribute_texture_coordinate, 2, GL_FLOAT, GL_FALSE, 0,texture_coords);
glEnableVertexAttribArray(gl_attribute_texture_coordinate);
// Load textures
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
if(flag)
{
flag = false;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
else
{
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
glUniform1i(glGetUniformLocation(gl_program_id, "inputImageTexture"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glUniformMatrix4fv(mvpLocation, 1, 0, gComputeMVP);
glDrawArrays(GL_TRIANGLE_STRIP ,0 ,4);
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(0, 0, imageWidth, imageHeight,GL_RGBA, GL_UNSIGNED_BYTE, data);
glFinish();
cout<<"data: "<<(int)data[0]<<"; "<<(int)data[1]<<", "<<(int)data[2]<<endl;
return 1;
}
The vertex shader ans fragment shader have been provided below,
static float l = -1.f, r = 1.f;
static float b = -1.f, t = 1.f;
static float n = 0.1f, f = 100.f;
static float gComputeMVP[16] = {
2.0f/(r-l), 0.0f, 0.0f, 0.0f,
0.0f, 2.0f/(t-b), 0.0f, 0.0f,
0.0f, 0.0f, -2.0f/(f-n), 0.0f,
-(r+l)/(r-l), -(t+b)/(t-b), -(f+n)/(f-n), 1.0f
};
// Shader sources
const GLchar* vertex_shader_str =
"attribute vec4 position;\n"
"attribute vec4 inputTextureCoordinate;\n"
"varying mediump vec2 textureCoordinate;\n"
"uniform mat4 mvpMatrix;\n"
"void main()\n"
"{\n"
" gl_Position = position;\n"
" gl_Position = mvpMatrix * position;\n"
" textureCoordinate = inputTextureCoordinate.xy;\n"
"}";
const char* fragment_shader_str = ""
" varying mediump vec2 textureCoordinate;\n"
"\n"
" uniform sampler2D inputImageTexture;\n"
" \n"
" void main()\n"
" {\n"
" mediump vec4 Color = texture2D(inputImageTexture, textureCoordinate);\n"
" gl_FragColor = vec4(Color.z, Color.y, Color.x, Color.w);\n"
" }";
I don't know why the quality of each frame has been decreased. And also, when I compare the output of variable, data, before and after using drawingOGL, as these two lines shown below,
cout<<"data: "<<(int)data[0]<<"; "<<(int)data[1]<<", "<<(int)data[2]<<endl;
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
The first line gave the right pixel value. But, the second line always gave ZERO. It's really strange, right?
I have found out the reason to this strange problem. It was caused by the context of unity3D. I'm not familiar with unity3d. So, maybe, only someone like me will do something stupid like this. There is some special setting related with OpenGL ES in the context, belonging to unity3d. So, in order to finish the task in snapshot, a new context has to be established and only activate it when the snapshot is working.
In order to solve the problem, I construct an individual context (EAGLContext*) for the snapshot task like this,
- (void *)snapshot
{
// NSLog(#"snapshot used here");
GLint backingWidth1, backingHeight1;
glBindRenderbufferOES(GL_RENDERBUFFER_OES, mainDisplaySurface->systemColorRB);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth1);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight1);
NSInteger x = 0, y = 0, width = backingWidth1, height = backingHeight1;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_BGRA, GL_UNSIGNED_BYTE, data);
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
NSLog(#"backingWidth1 : %d, backingHeight1: %d", backingWidth1, backingHeight1);
if (transformData == NULL)
{
mycontext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
[EAGLContext setCurrentContext: mycontext];
NSLog(#"transformData initial");
transformData = new loadDataFromeX();
transformData->setupOGL(backingWidth1, backingHeight1);
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
{
[EAGLContext setCurrentContext: mycontext];
transformData->drawingOGL(data);
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
}
When the resources used for snapshot is to be released, the codes is written like this,
if (transformData != NULL)
{
{
[EAGLContext setCurrentContext: mycontext];
transformData->destroy();
delete transformData;
transformData = NULL;
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
[mycontext release];
mycontext = nil;
}
I was curious to see the performance of texture uploads with my configuration using OpenGL and noticed something I think is odd. I create a 4K texture using glTexStorage2D with a format of GL_RGBA8. Then, every frame I use glTexSubImage2D to re-upload a static image buffer to the texture. Based off the frame rate I get about 5.19GB/s. Next, I changed the format of the texture to GL_SRGB8_ALPHA8 and re-try the experiment. This time I am getting 2.81GB/s, a significant decrease. This seems odd because as far as I know there shouldn't be anything different about uploading sRGB data verses uploading RGB data, as there is no conversion that should be taking place (sRGB conversion should take place in the shader, during sampling).
Some additional information. For the first test I use GL_RGBA and GL_UNSIGNED_INT_8_8_8_8_REV in the call to glTexSubImage2D, as this is what the driver (through glGetInternalformativ) tells me is ideal. For the second test I use GL_UNSIGNED_INT_8_8_8_8, as per the drivers suggestion. A bit of testing confirms that these are the fastest formats to use respectively. This is using a Nvidia GeForce GTX 760 on Windows 7 x64 using the 332.21 drivers.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <vector>
#include <cstdlib>
#include <cstdio>
#define SCREEN_SIZE_X 1024
#define SCREEN_SIZE_Y 1024
#define GLSL(src) "#version 440 core\n" #src
const char* vertex_shader = GLSL(
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main()
{
gl_Position = vec4(data[gl_VertexID], 0.0, 1.0);
}
);
const char* fragment_shader = GLSL(
layout(location = 0) uniform sampler2D texture0;
layout(location = 1) uniform vec2 screenSize;
out vec4 frag_color;
void main()
{
frag_color = texture(texture0, gl_FragCoord.xy / screenSize);
}
);
int main(int argc, char *argv[])
{
if(!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow* window = glfwCreateWindow(SCREEN_SIZE_X, SCREEN_SIZE_Y, "OpenGL Texture Upload", nullptr, nullptr);
if(!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSwapInterval(0);
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
GLuint vao = 0;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs, 1, &vertex_shader, nullptr);
glCompileShader(vs);
GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fs, 1, &fragment_shader, nullptr);
glCompileShader(fs);
GLuint shader_program = glCreateProgram();
glAttachShader(shader_program, fs);
glAttachShader(shader_program, vs);
glLinkProgram(shader_program);
glUseProgram(shader_program);
glProgramUniform2f(shader_program, 1, SCREEN_SIZE_X, SCREEN_SIZE_Y);
GLuint texture = 0;
glGenTextures(1, &texture);
#ifdef USE_SRGB
glTextureStorage2DEXT(texture, GL_TEXTURE_2D, 1, GL_SRGB8_ALPHA8, 4096, 4096);
#else
glTextureStorage2DEXT(texture, GL_TEXTURE_2D, 1, GL_RGBA8, 4096, 4096);
#endif
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindMultiTextureEXT(GL_TEXTURE0, GL_TEXTURE_2D, texture);
glProgramUniform1i(shader_program, 0, 0);
std::vector<unsigned int> image_buffer(4096*4096, 0xFF0000FFul);
double lastTime = glfwGetTime();
double nbFrames = 0;
while(!glfwWindowShouldClose(window))
{
double currentTime = glfwGetTime();
nbFrames++;
if (currentTime - lastTime >= 1.0)
{
char cbuffer[50];
snprintf(cbuffer, sizeof(cbuffer), "OpenGL Texture Upload [%.1f fps, %.3f ms]", nbFrames, 1000.0 / nbFrames);
glfwSetWindowTitle(window, cbuffer);
nbFrames = 0;
lastTime++;
}
#ifdef USE_SRGB
glTextureSubImage2DEXT(texture, GL_TEXTURE_2D, 0, 0, 0, 4096, 4096, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, image_buffer.data());
#else
glTextureSubImage2DEXT(texture, GL_TEXTURE_2D, 0, 0, 0, 4096, 4096, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, image_buffer.data());
#endif
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Apparently there is such a thing as a 'native pixel format'. Look at this link from Nvidia, especially section 32.1.3.
First I am creating texture data using this code:
#define checkImageWidth 64
#define checkImageHeight 64
GLint checkImage[checkImageHeight][checkImageWidth][3];
void makeCheckImage(void)
{
int i, j, c;
for (i = 0; i < checkImageHeight; i++) {
for (j = 0; j < checkImageWidth; j++) {
c = (((i&0x8)==0)^((j&0x8)==0))*255;
checkImage[i][j][0] = (GLubyte) c;
checkImage[i][j][1] = (GLubyte) c;
checkImage[i][j][2] = (GLubyte) c;
}
}
}
then i draw the texture using this code :
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, checkImageWidth, checkImageHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, checkImage);
glDrawTexiOES(0, 0, 0, checkImageWidth, checkImageHeight);
But i do not see any output. What I am doing wrong here?
I'm trying to port some OpenGL 3.2 code from Windows to OS/X 10.8 (using GLFW core profile), but get a INVALID_OPERATION (glError()) when I call the glDrawElements. The glDrawArrays functions works fine, so my shaders are initialized ok.
The following snippet explains well what I am doing. Any idea of what I do wrong?
struct Vertex {
vec2 position;
vec3 color;
};
void display() {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderProgram);
mat4 projection = Ortho2D(-15.0f, 15.0f, -15.0f, 15.0f);
glUniformMatrix4fv(projectionUniform, 1, GL_TRUE, projection);
glBindVertexArray(shapeVertexArrayBuffer);
mat4 modelView;
// upper left
modelView = Translate(-7,+7,0);
glUniformMatrix4fv(modelViewUniform, 1, GL_TRUE, modelView);
glDrawArrays(GL_TRIANGLE_FAN, 0, rectangleSize); // renders correctly
// upper right
modelView = Translate(+7,+7,0);
glUniformMatrix4fv(modelViewUniform, 1, GL_TRUE, modelView);
GLuint indices[6] = {0,1,2,3,4,0};
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, indices); //INVALID_OPERATION
glfwSwapBuffers();
}
void loadGeometry() {
vec3 color(1.0f, 1.0f, 0.0f);
Vertex rectangleData[rectangleSize] = {
{ vec2( 0.0, 0.0 ), color },
{ vec2( 5.0, -5.0 ), color },
{ vec2( 5.0, 0.0 ), color },
{ vec2( 0.0, 5.0 ), color },
{ vec2(-5.0, 0.0 ), color },
{ vec2(-5.0, -5.0 ), color }
};
shapeVertexArrayBuffer = loadBufferData(rectangleData, rectangleSize);
}
GLuint loadBufferData(Vertex* vertices, int vertexCount) {
GLuint vertexArrayObject;
glGenVertexArrays(1, &vertexArrayObject);
glBindVertexArray(vertexArrayObject);
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vertexCount * sizeof(Vertex), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(positionAttribute);
glEnableVertexAttribArray(colorAttribute);
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid *)0);
glVertexAttribPointer(colorAttribute , 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid *)sizeof(vec2));
return vertexArrayObject;
}
You are suppose to create an indices buffer after your vertex buffer.
GLuint elementBuffer;
glGenBuffers(1, &elementBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
Then you can call glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
I'm using to render YUV frames of ffmpeg with the iOS 5.0 method "CVOpenGLESTextureCacheCreateTextureFromImage".
I'm using like the apple example GLCameraRipple
My result in iPhone screen is this: iPhone Screen
I need to know I'm doing wrong.
I put part of my code to find errors.
ffmpeg configure frames:
ctx->p_sws_ctx = sws_getContext(ctx->p_video_ctx->width,
ctx->p_video_ctx->height,
ctx->p_video_ctx->pix_fmt,
ctx->p_video_ctx->width,
ctx->p_video_ctx->height,
PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
// Framebuffer for RGB data
ctx->p_frame_buffer = malloc(avpicture_get_size(PIX_FMT_YUV420P,
ctx->p_video_ctx->width,
ctx->p_video_ctx->height));
avpicture_fill((AVPicture*)ctx->p_picture_rgb, ctx->p_frame_buffer,PIX_FMT_YUV420P,
ctx->p_video_ctx->width,
ctx->p_video_ctx->height);
My render method:
if (NULL == videoTextureCache) {
NSLog(#"displayPixelBuffer error");
return;
}
CVPixelBufferRef pixelBuffer;
CVPixelBufferCreateWithBytes(kCFAllocatorDefault, mTexW, mTexH, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, buffer, mFrameW * 3, NULL, 0, NULL, &pixelBuffer);
CVReturn err;
// Y-plane
glActiveTexture(GL_TEXTURE0);
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RED_EXT,
mTexW,
mTexH,
GL_RED_EXT,
GL_UNSIGNED_BYTE,
0,
&_lumaTexture);
if (err)
{
NSLog(#"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(_lumaTexture), CVOpenGLESTextureGetName(_lumaTexture));
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// UV-plane
glActiveTexture(GL_TEXTURE1);
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RG_EXT,
mTexW/2,
mTexH/2,
GL_RG_EXT,
GL_UNSIGNED_BYTE,
1,
&_chromaTexture);
if (err)
{
NSLog(#"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(_chromaTexture), CVOpenGLESTextureGetName(_chromaTexture));
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebuffer);
// Set the view port to the entire view
glViewport(0, 0, backingWidth, backingHeight);
static const GLfloat squareVertices[] = {
1.0f, 1.0f,
-1.0f, 1.0f,
1.0f, -1.0f,
-1.0f, -1.0f,
};
GLfloat textureVertices[] = {
1, 1,
1, 0,
0, 1,
0, 0,
};
// Draw the texture on the screen with OpenGL ES 2
[self renderWithSquareVertices:squareVertices textureVertices:textureVertices];
// Flush the CVOpenGLESTexture cache and release the texture
CVOpenGLESTextureCacheFlush(videoTextureCache, 0);
CVPixelBufferRelease(pixelBuffer);
[moviePlayerDelegate bufferDone];
RenderWithSquareVertices method
- (void)renderWithSquareVertices:(const GLfloat*)squareVertices textureVertices:(const GLfloat*)textureVertices
{
// Use shader program.
glUseProgram(shader.program);
// Update attribute values.
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXTUREPOSITON, 2, GL_FLOAT, 0, 0, textureVertices);
glEnableVertexAttribArray(ATTRIB_TEXTUREPOSITON);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// Present
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER];
}
My fragment shader:
uniform sampler2D SamplerY;
uniform sampler2D SamplerUV;
varying highp vec2 _texcoord;
void main()
{
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = texture2D(SamplerY, _texcoord).r;
yuv.yz = texture2D(SamplerUV, _texcoord).rg - vec2(0.5, 0.5);
// BT.601, which is the standard for SDTV is provided as a reference
/* rgb = mat3( 1, 1, 1,
0, -.34413, 1.772,
1.402, -.71414, 0) * yuv;*/
// Using BT.709 which is the standard for HDTV
rgb = mat3( 1, 1, 1,
0, -.18732, 1.8556,
1.57481, -.46813, 0) * yuv;
gl_FragColor = vec4(rgb, 1);
}
Very thanks,
I imagine the issue is that YUV420 (or I420) is a tri-planar image format. I420 is an 8 bit Y plane followed by 8 bit 2x2 subsampled U and V planes. The code from GLCameraRipple is expecting NV12 format: 8-bit Y plane followed by an interleaved U/V plane with 2x2 subsampling. Given this I expect you will need three textures. luma_tex, u_chroma_tex, v_chroma_tex.
Also note that GLCameraRipple may also be expecting 'video range'. In other words the values for the planar format are luma=[16,235] chroma=[16,240].