Related
How to display the image texture in GLES2.
In below source initializing GLES2 display,surface ..,
Creating offline framebuffer,
Loading the RGBA image to texture,
Clearing the screen by BLUE color,
Trying to display the loaded image texture (.. failed to find correct API for GLES2)
Reading the FBO & writing to a file.
For displaying glEnableClientState&glVertexPointer API's is not supporting in GLES2
How to display the loaded image texture in GLES2.
In the below source getting only blue color in buffer got from glReadPixels
unsigned char *video_raw = loadFile("./video.raw");//RGBA raw image
int iConfigs;
EGLConfig eglConfig;
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2,EGL_NONE };
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType)0);
eglInitialize(eglDisplay, 0, 0);
eglBindAPI(EGL_OPENGL_ES_API);
EGLint pi32ConfigAttribs[5];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_NONE;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1, &iConfigs);
EGLSurface eglSurface = eglCreatePbufferSurface(eglDisplay, eglConfig, NULL);
EGLContext eglContext = eglCreateContext(eglDisplay, eglConfig, NULL, ai32ContextAttribs);
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
GLuint fboId = 0;
GLuint renderBufferWidth = 960;
GLuint renderBufferHeight = 540;
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
GLuint renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB565, renderBufferWidth, renderBufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glClearColor(0.0,0.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture_object_id;
glGenTextures(1, &texture_object_id);
glBindTexture(GL_TEXTURE_2D, texture_object_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, renderBufferWidth, renderBufferHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, video_raw);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
/*glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vtx1);
glTexCoordPointer(2, GL_FLOAT, 0, tex1);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);*/
eglSwapBuffers( eglDisplay, eglSurface);
//read & write to a file
int size = 4 * renderBufferHeight * renderBufferWidth;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, renderBufferWidth, renderBufferHeight, GL_RGBA, GL_UNSIGNED_BYTE, data2);
dumptoFile("./read1.raw", size, data2);
Edit 1:
#Rabbid76 ,
Thanks for the reply. When i used your vertx shader "in vec3 inPos;\n" shader compilation failed. i replace "in" with "uniform".
Getting black screen from the below source with your input added.
static const GLuint WIDTH = 960;
static const GLuint HEIGHT = 540;
static const GLchar* vertex_shader_source =
"#version 100\n"
"precision mediump float;\n"
"uniform vec3 inPos;\n"
"uniform vec2 inUV;\n"
"varying vec2 vUV;\n"
"void main(){\n"
" vUV = inUV;\n"
" gl_Position = vec4(inPos, 1.0);\n"
"}\n";
static const GLchar* fragment_shader_source =
"#version 100\n"
"precision mediump float;\n"
"varying vec2 vUV;\n"
"uniform sampler2D u_texture;\n"
"void main(){\n"
" gl_FragColor = texture2D(u_texture, vUV);\n"
"}\n";
int main(int argc, char **argv)
{
unsigned char *video_raw = loadFile("./video.raw");
int iConfigs;
EGLConfig eglConfig;
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType) 0);
eglInitialize(eglDisplay, 0, 0);
eglBindAPI(EGL_OPENGL_ES_API);
EGLint pi32ConfigAttribs[5];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_WINDOW_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_NONE;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1, &iConfigs);
EGLSurface eglSurface = eglCreatePbufferSurface(eglDisplay, eglConfig, NULL);
EGLContext eglContext = eglCreateContext(eglDisplay, eglConfig, NULL, ai32ContextAttribs);
eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
GLuint shader_program, framebuffer, renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, WIDTH, HEIGHT);
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glViewport(0, 0, WIDTH, HEIGHT);
glEnable(GL_TEXTURE_2D);
shader_program = common_get_shader_program(vertex_shader_source, fragment_shader_source);
GLint vert_inx = glGetAttribLocation(shader_program, "inPos");
GLint uv_inx = glGetAttribLocation(shader_program, "inUV");
GLint tex_loc = glGetUniformLocation(shader_program, "u_texture");
GLuint texture_object_id;
glGenTextures(1, &texture_object_id);
glBindTexture(GL_TEXTURE_2D, texture_object_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, WIDTH, HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, video_raw);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
glVertexAttribPointer(vert_inx, 3, GL_FLOAT, GL_FALSE, 0, vtx1);
glEnableVertexAttribArray(vert_inx);
glVertexAttribPointer(uv_inx, 2, GL_FLOAT, GL_FALSE, 0, tex1);
glEnableVertexAttribArray(uv_inx);
glViewport(0,0,renderBufferWidth,renderBufferHeight);
glUseProgram(shader_program);
glUniform1i(tex_loc, 0);
glDrawArrays( GL_TRIANGLE_FAN, 0, 4);
glFlush();
int size = 4 * WIDTH * HEIGHT;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE, data2);
dumptoFile("./read1.raw", size, data2);
return EXIT_SUCCESS;
}
You have to use a shader program, and to define the arrays of generic vertex attribute data. See also Vertex Specification.
Create, compile and link a very simple shader program like the following:
const char *sh_vert =
"#version 100\n"\
"precision mediump float;\n"\
"attribute vec3 inPos;\n"\
"attribute vec2 inUV;\n"\
"varying vec2 vUV;\n"\
"void main()\n"\
"{\n"\
" vUV = inUV;\n"\
" gl_Position = vec4(inPos, 1.0);\n"\
"}";
const char *sh_frag =
"#version 100\n"\
"precision mediump float;\n"\
"varying vec2 vUV;\n"\
"uniform sampler2D u_texture;\n"\
"void main()\n"\
"{\n"\
" gl_FragColor = texture2D(u_texture, vUV);\n"\
"}";
GLuint v_sh = glCreateShader( GL_VERTEX_SHADER );
glShaderSource( v_sh, 1, &sh_vert, nullptr );
glCompileShader( v_sh );
GLint status = GL_TRUE;
glGetShaderiv( v_sh, GL_COMPILE_STATUS, &status );
if ( status == GL_FALSE )
{
// compile error
}
GLuint f_sh = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( f_sh, 1, &sh_frag, nullptr );
glCompileShader( f_sh );
status = GL_TRUE;
glGetShaderiv( f_sh, GL_COMPILE_STATUS, &status );
if ( status == GL_FALSE )
{
// compile error
}
GLuint prog = glCreateProgram();
glAttachShader( prog, v_sh );
glAttachShader( prog, f_sh );
glLinkProgram( prog );
status = GL_TRUE;
glGetProgramiv( prog, GL_LINK_STATUS, &status );
if ( status == GL_FALSE )
{
// link error
}
Get the attribute indices and the location of the texture sampler uniform:
GLint vert_inx = glGetAttribLocation( prog, "inPos" );
GLint uv_inx = glGetAttribLocation( prog, "inUV" );
GLint tex_loc = glGetUniformLocation( prog, "u_texture" );
Then define the arrays of generic vertex attribute data by (glVertexAttribPointer) and enable them by glEnableVertexAttribArray:
GLfloat vtx1[] = { -1, -1, 0, -1, 1, 0, 1, 1, 0, 1, -1, 0 };
GLfloat tex1[] = { 0, 0, 0, 1, 1, 1, 1, 0 };
glVertexAttribPointer( vert_inx, 3, GL_FLOAT, GL_FALSE, 0, vtx1);
glEnableVertexAttribArray( vert_inx );
glVertexAttribPointer( uv_inx, 2, GL_FLOAT, GL_FALSE, 0, tex1);
glEnableVertexAttribArray( uv_inx );
Setup the renderbuffer and the framebuffer and adjust the viewport:
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
GLuint renderBuffer;
glGenRenderbuffers(1, &renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, renderBufferWidth, renderBufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderBuffer);
glViewport(0,0,renderBufferWidth,renderBufferHeight);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
Use the program, set the texture sampler uniform and draw the geometry:
// use the program
glUseProgram( prog );
glUniform1i( tex_loc, 0 ); // 0 == texture unit 0
// draw the geometry
glDrawArrays( GL_TRIANGLE_FAN, 0, 4 );
glUseProgram( 0 );
Finally the image can be read:
int size = 4 * renderBufferHeight * renderBufferWidth;
unsigned char *data2 = new unsigned char[size];
glReadPixels(0, 0, renderBufferWidth, renderBufferHeight, GL_RGBA, GL_UNSIGNED_BYTE, data2);
I was curious to see the performance of texture uploads with my configuration using OpenGL and noticed something I think is odd. I create a 4K texture using glTexStorage2D with a format of GL_RGBA8. Then, every frame I use glTexSubImage2D to re-upload a static image buffer to the texture. Based off the frame rate I get about 5.19GB/s. Next, I changed the format of the texture to GL_SRGB8_ALPHA8 and re-try the experiment. This time I am getting 2.81GB/s, a significant decrease. This seems odd because as far as I know there shouldn't be anything different about uploading sRGB data verses uploading RGB data, as there is no conversion that should be taking place (sRGB conversion should take place in the shader, during sampling).
Some additional information. For the first test I use GL_RGBA and GL_UNSIGNED_INT_8_8_8_8_REV in the call to glTexSubImage2D, as this is what the driver (through glGetInternalformativ) tells me is ideal. For the second test I use GL_UNSIGNED_INT_8_8_8_8, as per the drivers suggestion. A bit of testing confirms that these are the fastest formats to use respectively. This is using a Nvidia GeForce GTX 760 on Windows 7 x64 using the 332.21 drivers.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <vector>
#include <cstdlib>
#include <cstdio>
#define SCREEN_SIZE_X 1024
#define SCREEN_SIZE_Y 1024
#define GLSL(src) "#version 440 core\n" #src
const char* vertex_shader = GLSL(
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main()
{
gl_Position = vec4(data[gl_VertexID], 0.0, 1.0);
}
);
const char* fragment_shader = GLSL(
layout(location = 0) uniform sampler2D texture0;
layout(location = 1) uniform vec2 screenSize;
out vec4 frag_color;
void main()
{
frag_color = texture(texture0, gl_FragCoord.xy / screenSize);
}
);
int main(int argc, char *argv[])
{
if(!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow* window = glfwCreateWindow(SCREEN_SIZE_X, SCREEN_SIZE_Y, "OpenGL Texture Upload", nullptr, nullptr);
if(!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSwapInterval(0);
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
GLuint vao = 0;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs, 1, &vertex_shader, nullptr);
glCompileShader(vs);
GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fs, 1, &fragment_shader, nullptr);
glCompileShader(fs);
GLuint shader_program = glCreateProgram();
glAttachShader(shader_program, fs);
glAttachShader(shader_program, vs);
glLinkProgram(shader_program);
glUseProgram(shader_program);
glProgramUniform2f(shader_program, 1, SCREEN_SIZE_X, SCREEN_SIZE_Y);
GLuint texture = 0;
glGenTextures(1, &texture);
#ifdef USE_SRGB
glTextureStorage2DEXT(texture, GL_TEXTURE_2D, 1, GL_SRGB8_ALPHA8, 4096, 4096);
#else
glTextureStorage2DEXT(texture, GL_TEXTURE_2D, 1, GL_RGBA8, 4096, 4096);
#endif
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTextureParameteriEXT(texture, GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindMultiTextureEXT(GL_TEXTURE0, GL_TEXTURE_2D, texture);
glProgramUniform1i(shader_program, 0, 0);
std::vector<unsigned int> image_buffer(4096*4096, 0xFF0000FFul);
double lastTime = glfwGetTime();
double nbFrames = 0;
while(!glfwWindowShouldClose(window))
{
double currentTime = glfwGetTime();
nbFrames++;
if (currentTime - lastTime >= 1.0)
{
char cbuffer[50];
snprintf(cbuffer, sizeof(cbuffer), "OpenGL Texture Upload [%.1f fps, %.3f ms]", nbFrames, 1000.0 / nbFrames);
glfwSetWindowTitle(window, cbuffer);
nbFrames = 0;
lastTime++;
}
#ifdef USE_SRGB
glTextureSubImage2DEXT(texture, GL_TEXTURE_2D, 0, 0, 0, 4096, 4096, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, image_buffer.data());
#else
glTextureSubImage2DEXT(texture, GL_TEXTURE_2D, 0, 0, 0, 4096, 4096, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, image_buffer.data());
#endif
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Apparently there is such a thing as a 'native pixel format'. Look at this link from Nvidia, especially section 32.1.3.
I'm looking to use OpenGl to blend 4 images to the screen. The first image contains the background and the other 3 are images contain some cartoon with transparency. My goal is to render those 4 images at once. At every call to render I'm updating the top 3 images with new images to compose the new frame.
I'm new to OpenGL and so far I'm able to achieve blending those images however I'm noticing some horrible issues when I render. I'm seeing some of the top 3 images are missing sometimes or some are rendered but look like they where cropped by the invisible man...
Each lines represents a different image.
Image cropped issue:
Image cropped and one image missing:
How it should look like:
Any help figuring out the issue I would greatly appreciate it! Below the code I'm using.
Here is the code I use to render the images.
void MoviePreview::prepareTexture (GLuint texture, GLint format, int w, int h)
{
// Bind to generated texture
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glShadeModel(GL_FLAT);
//glShadeModel(GL_SMOOTH);
glTexImage2D(GL_TEXTURE_2D, 0, format, w, h, 0, format, GL_UNSIGNED_BYTE, 0);
if (GL_RGBA == format)
{
//glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
}
// Crop the texture rectangle
GLint rect[] = {0, h, w, -h};
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, rect);
glDrawTexiOES(0, 0, 0, w, h);
}
void MoviePreview::resize (int w, int h)
{
LOGI("native_gl_resize %d %d", w, h);
// init open gl
//glClearColor(1.0, 1.0, 1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glDisable(GL_DITHER);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_FOG);
glDisable(GL_CULL_FACE);
glDisable(GL_STENCIL_TEST);
glDisable(GL_COLOR_LOGIC_OP);
glDisable(GL_ALPHA_TEST);
//glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);//NEW
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);//NEW
//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_DST_ALPHA);
// Generate one texture object
glGenTextures(MAX_TEXTURES, mTexture);
check_gl_error("glGenTextures");
int frameHeight = h;//image.rows;
int frameWidth = w;//image.cols;
// first texture is our background texture
prepareTexture(mTexture[0], GL_RGBA, mBg.cols, mBg.rows);
prepareTexture(mTexture[1], GL_RGBA, frameWidth, frameHeight);
prepareTexture(mTexture[2], GL_RGBA, frameWidth, frameHeight);
prepareTexture(mTexture[3], GL_RGBA, frameWidth, frameHeight);
mSurfaceWidth = w;
mSurfaceHeight = h;
}
void MoviePreview::render (int64_t* imageIds, const int images)
{
int i = 0;
double sleepDuration = 0;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// TODO try to see if we can just get away from always loading the bg
// since it doesn't change often might be worth loading it once and that
// is it...
//glBindTexture(GL_TEXTURE_2D, mTexture[0]);
//glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mBg.cols, mBg.rows, GL_RGBA, GL_UNSIGNED_BYTE, mBg.ptr());
//glDrawTexiOES(0, 0, 0, mSurfaceWidth, mSurfaceHeight);
// TODO pass the image batch loader
// load images
for (i=0; i<images; i++)
{
if (0 < imageIds[i])
{
sprintf(mTempPath, "%s/f1_%lld.png",mProjectPath.c_str(), imageIds[i]);
mImageLoader[i].loadImage(mTempPath);
}
}
if (0 < mFrameDuration)
{
// here we try to control the suggested frame rate
// set. We calculate since the last show image time
// if we should sleep or not...
if (0 < mLastDrawTimestamp) {
sleepDuration = mFrameDuration - (now_ms() - mLastDrawTimestamp);
}
if (0 < sleepDuration) {
usleep((long)sleepDuration*NANO_IN_MS);
}
}
// draw images
i = 0;
for (i=0; i<images; i++)
{
if (0 < imageIds[i])
{
cv::Mat img = mImageLoader[i].getImage();
if (!img.empty())
{
glBindTexture(GL_TEXTURE_2D, mTexture[i+1]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, img.cols, img.rows, GL_RGBA, GL_UNSIGNED_BYTE, img.ptr());
glDrawTexiOES(0, 0, 0, img.cols, img.rows);
}
}
}
mLastDrawTimestamp = now_ms();
}
The issue ended up being with my image sources getting modified by another thread while OpenGl was trying to draw them...
First I am creating texture data using this code:
#define checkImageWidth 64
#define checkImageHeight 64
GLint checkImage[checkImageHeight][checkImageWidth][3];
void makeCheckImage(void)
{
int i, j, c;
for (i = 0; i < checkImageHeight; i++) {
for (j = 0; j < checkImageWidth; j++) {
c = (((i&0x8)==0)^((j&0x8)==0))*255;
checkImage[i][j][0] = (GLubyte) c;
checkImage[i][j][1] = (GLubyte) c;
checkImage[i][j][2] = (GLubyte) c;
}
}
}
then i draw the texture using this code :
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, checkImageWidth, checkImageHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, checkImage);
glDrawTexiOES(0, 0, 0, checkImageWidth, checkImageHeight);
But i do not see any output. What I am doing wrong here?
I'm trying to use NDK to display a bitmap on a full screen. The requirement force me to develop with completely native code. I use Skia to draw a SkBitmap, then display it by Opengl APIs. When my application first time run on a real android device, it always works well. However, after I keep opening and closing the program for several times, it will show a bad image. Why does this happen?
Function engine_init_display is to initialize OpenGL ES and EGL, create bitmap and load texture.
static int engine_init_display(struct engine* engine){
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_DEPTH_SIZE, 16,
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_NONE
};
EGLint w, h, dummy, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
eglInitialize(display, 0, 0);
/* Here, the application chooses the configuration it desires. In this
* sample, we have a very simplified selection process, where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display, attribs, &config, 1, &numConfigs);
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
context = eglCreateContext(display, config, NULL, NULL);
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
LOGW("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display, surface, EGL_WIDTH, &w);
eglQuerySurface(display, surface, EGL_HEIGHT, &h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
engine->format=format;
// Initialize GL state.
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
glEnable(GL_CULL_FACE);
glShadeModel(GL_SMOOTH);
glDisable(GL_DEPTH_TEST);
SkBitmap bitmap;
GLvoid* bitmapBuffer;
createBitmap(bitmap,bitmapBuffer,width,height);
drawBitmap(bitmap,width,height);
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&sTexture);
glBindTexture(GL_TEXTURE_2D,sTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)bitmapBuffer);
glDisable(GL_TEXTURE_2D);
glFinish();
clearBitmapBuffer(bitmap,bitmapBuffer);
//engine_draw_frame(engine);
return 0;
}
Function render is to display a bitmap
void render(struct engine* engine, int width, int height){
glViewport((engine->width-width)/2, (engine->height-height)/2, width, height);
glClearColorx((GLfixed)(0.1f * 65536),(GLfixed)(0.2f * 65536),(GLfixed)(0.3f * 65536), 0x10000);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterx(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture(GL_TEXTURE_2D,sTexture);
glFrontFace(GL_CW);
glTexCoordPointer(2, GL_FLOAT, 0, textureCoords);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glFlush();
eglSwapBuffers(engine->display, engine->surface);
}
And when the window is being closed, engine_term_display will be called
static void engine_term_display(struct engine* engine) {
if (engine->display != EGL_NO_DISPLAY) {
eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
if (engine->context != EGL_NO_CONTEXT) {
eglDestroyContext(engine->display, engine->context);
}
if (engine->surface != EGL_NO_SURFACE) {
eglDestroySurface(engine->display, engine->surface);
}
eglTerminate(engine->display);
}
engine->display = EGL_NO_DISPLAY;
engine->context = EGL_NO_CONTEXT;
engine->surface = EGL_NO_SURFACE;
}
update
android_main is the main entry point of my native application. I find that when this main function has returned, the whole application is still running.
void android_main(struct android_app* state) {
struct engine engine;
// Make sure glue isn't stripped.
app_dummy();
memset(&engine, 0, sizeof(engine));
state->userData = &engine;
state->onAppCmd = engine_handle_cmd;
state->onInputEvent = engine_handle_input;
engine.app = state;
ANativeActivity_setWindowFlags(engine.app->activity, AWINDOW_FLAG_FULLSCREEN, 0);
if (state->savedState != NULL) {
// We are starting with a previous saved state; restore from it.
engine.state = *(struct saved_state*)state->savedState;
}
while(1){
int ident;
int events;
struct android_poll_source* source;
if((ident=ALooper_pollAll(-1, NULL, &events,(void**)&source))>=0){
// Process this event.
if (source != NULL) {
source->process(state, source);
}
if (state->destroyRequested != 0) {
engine_term_display(&engine);
return;
}
}
}
}
I fix it by myself, use exit(0) instead of return to finish the activity completely.
if (state->destroyRequested != 0) {
engine_term_display(&engine);
exit(0);
}