Textures show black when using GL_LINEAR_MIPMAP_LINEAR in emscripten - opengl-es

I have a small program that just draws a full screen quad with a texture.
This program works fine when I run it on Windows.
However it seems to fail when compiling to WebGL2 (GLES3) using Emscripten. It just shows a black quad.
I have figured out that the problem happens only when I enable mipmapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
This is the code that initializes the texture:
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
struct Color { u8 r, g, b; };
const Color pixels[] = {
{255, 0, 0}, {0, 255, 0},
{0, 0, 255}, {255, 255, 0}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
EDIT:
I have found that, if instead of calling glGenerateMipmap, I specify the mip levels manually, it works!
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glTexImage2D(GL_TEXTURE_2D, 1, GL_SRGB8, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
Repo with the full code: https://github.com/tuket/emscripten_problem_with_mipmap_textures
EDIT2:
Another interesting finding: if instead of using a 3-channel texture, I use a 4-channel texture, it works!
struct Color { u8 r, g, b, a; };
const Color pixels[] = {
{255, 0, 0, 255}, {0, 255, 0, 255},
{0, 0, 255, 255}, {255, 255, 0, 255}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);

Related

Can you glBlitFramebuffer between different levels of the same texture?

Let's say I have a texture that I need to mipmap but I want the mipmapping done hardware accelerated. I decided the best route to take would be something like this:
int read = glGenFramebuffers();
int draw = glGenFramebuffers();
int wh = 256;
int glObject = glGenTextures();
glBindTexture(GL_TEXTURE_2D, glObject);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, wh, wh, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
for(int i = 0; i < mipLevels; ++i) {
glBindFramebuffer(GL_FRAMEBUFFER, read);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i);
glTexImage2D(GL_TEXTURE_2D, i + 1, GL_RGBA8, wh / 2, wh / 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, (ByeBuffer)null);
glBindFramebuffer(GL_FRAMEBUFFER, draw);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i + 1);
glBindFramebuffer(GL_READ_FRAMEBUFFER, read);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, draw);
glBlitFramebuffer(0, 0, wh, wh, 0, 0, wh / 2, wh / 2, GL_COLOR_BUFFER_BIT, GL_LINEAR);
wh /= 2;
}
Both framebuffers return INCOMPLETE_ATTACHMENT in this block of code when glCheckFramebufferStatus is called. What am I doing wrong? Am I allocating the blank mipmap levels correctly?
Answer: I was using glTexImage2D to allocate blank mip levels when I should have used glTexStorage2D to allocate the blank levels under these circumstances. Here is a link to the manual page for glTexStorage2D for more details: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTexStorage2D.xhtml

how to set SbDecodeTargetInfo for cobalt decode to texture?

I implement the cobalt spherical feature. It work correctly for 1080p video, but if I play another resolution video such as 480p, the video is reversed.The test video is from https://ytlr-cert.appspot.com/2019/main.html
the format is YUVNV12.
I attach source core, could any one help to point out which caused it?
Thanks.
The reversed video:
My Source:
GL_CALL(glBindTexture(GL_TEXTURE_2D, out_info->planes[0].texture));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
GL_CALL(glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA,
decode_target->videoframe_info.Stride,
decode_target->videoframe_info.imageHeight, 0, GL_ALPHA,
GL_UNSIGNED_BYTE, decode_target->buffer_y));
out_info->planes[0].gl_texture_format = GL_ALPHA;
out_info->planes[0].width = decode_target->videoframe_info.Stride;
out_info->planes[0].height = decode_target->videoframe_info.imageHeight;
out_info->planes[0].content_region.right = decode_target->videoframe_info.imageWidth;
out_info->planes[0].content_region.bottom = decode_target->videoframe_info.imageHeight;
GL_CALL(glBindTexture(GL_TEXTURE_2D, 0));
GL_CALL(glBindTexture(GL_TEXTURE_2D, out_info->planes[1].texture));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
GL_CALL(glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA,
decode_target->videoframe_info.Stride / 2,
decode_target->videoframe_info.imageHeight / 2, 0,
GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE,
decode_target->buffer_uv));
GL_CALL(glBindTexture(GL_TEXTURE_2D, 0));
out_info->planes[1].gl_texture_format = GL_LUMINANCE_ALPHA;
out_info->planes[1].width = decode_target->videoframe_info.Stride / 2;
out_info->planes[1].height = decode_target->videoframe_info.imageHeight / 2;
out_info->planes[1].content_region.right = decode_target->videoframe_info.imageWidth / 2;
out_info->planes[1].content_region.bottom = decode_target->videoframe_info.imageHeight / 2;
out_info->width = decode_target->videoframe_info.imageWidth;
out_info->height = decode_target->videoframe_info.imageHeight;
out_info->format = kSbDecodeTargetFormat2PlaneYUVNV12;

Drawing YUV frames with OpenGL on mac

I'm decoding video with ffmpeg libraries and store decoded frames in array. Now i want to draw these frames on the screen with OpenGL. I googled and found that apple offers to use GL_APPLE_ycbcr_422 format for effective drawing. So i switched decoding frames in ffmpeg to PIX_FMT_YUYV422 format (packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr) which seems to be an equivalent of GL_APPLE_ycbcr_422 in OpenGL. Now i'm trying to draw frames on surface with this code:
GLsizei width = 2, height = 2;
uint8_t data[8] = {128,200,123,10,5,13,54,180};
GLuint texture_name;
glEnable(GL_TEXTURE_RECTANGLE_ARB);
assert(glGetError() == GL_NO_ERROR);
glGenTextures (1,&texture_name);
assert(glGetError() == GL_NO_ERROR);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texture_name);
assert(glGetError() == GL_NO_ERROR);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_ARB, width * height * 2, (void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_TRUE);
assert(glGetError() == GL_NO_ERROR);
// not sure about code above
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
assert(glGetError() == GL_NO_ERROR);
// end
glViewport(0, 0, width,height);
assert(glGetError() == GL_NO_ERROR);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
assert(glGetError() == GL_NO_ERROR);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB, width, height, 0,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexSubImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, 0, 0, width, height,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glScalef(1.0f, -1.0f, 1.0f);
glOrtho(0, width, 0, height, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBegin(GL_QUADS);
glVertex3f(0, 0, -1.0f);
glVertex3f(width, 0, -1.0f);
glVertex3f(width, height, -1.0f);
glVertex3f(0, height, -1.0f);
glEnd();
But i don't see correct frames. Instead i see corrupted pictures of my display. So i understand that my using of OpenGL is not correct and maybe i don't understand some fundamental things.
Please, help to correct and to understand my mistakes. If i should RTFM please give me a link that will help me, because i didn't find any useful info.
Here's what I use in one of my applications to set up a YUV 4:2:2 texture target for uploaded data from YUV frames pulled off of a CCD camera:
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_EXT);
glGenTextures(1, &textureName);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_EXT, videoImageSize.width * videoImageSize.height * 2, videoTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexImage2D(GL_TEXTURE_RECTANGLE_EXT, 0, GL_RGBA, videoImageSize.width, videoImageSize.height, 0, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, 0);
The actual upload to the texture is accomplished using the following:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTexSubImage2D (GL_TEXTURE_RECTANGLE_EXT, 0, 0, 0, videoImageSize.width, videoImageSize.height, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
In the above, videoTexture is the memory location of the raw bytes from a captured YUV frame off of the camera. I'm also using Apple's storage hints in the above to accelerate the upload, although I never could get the GL_UNPACK_CLIENT_STORAGE_APPLE optimization to speed things up further.
This was copied and pasted out of a working application, and the only real differences I see is my use of GL_RGBA instead of GL_RGB in the getTexImage2D() and the use of GL_UNSIGNED_SHORT_8_8_REV_APPLE instead of GL_UNSIGNED_SHORT_8_8_APPLE. Both the ARB and EXT rectangle extensions resolve to the same thing on the Mac.

Whats the difference between these two OpenGL calls (Basic OpenGL Question)?

It's a really stupid, basic question, but: Can someone please tell me the difference between:
glBegin(GL_QUADS);
glTexCoord3f(0, 0, 0); glVertex3f(0, 1, 0);
glTexCoord3f(1, 0, 0); glVertex3f(1, 1, 0);
glTexCoord3f(1, 1, 0); glVertex3f(1, 0, 0);
glTexCoord3f(0, 1, 0); glVertex3f(0, 0, 0);
glEnd();
and
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
const GLfloat vertexData[] = {
0, 0, 0,
width, 0, 0,
0, height, 0,
width, height, 0
};
const GLfloat texCoords[] = {
0, 0,
1, 0,
0, 1,
1, 1
};
glVertexPointer(3, GL_FLOAT, 0, &vertexData);
glTexCoordPointer(2, GL_FLOAT, 0, &texCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
(width & height is the texture size in pixel)
I can't really understand why, but if I use the second code (because OpenGL ES compatible) my texture is on the y-axis inverted. What I'm doing wrong?
Edit:
I dont know, if its relevant - maybe i made a mistake at init the viewport for 2D drawing?
GLint iViewport[4];
glGetIntegerv( GL_VIEWPORT, iViewport );
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho( iViewport[0], iViewport[0]+iViewport[2], iViewport[1]+iViewport[3], iViewport[1], -1, 1 );
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
Sticking to the simplest, you have 4 vertices with texcoords. Here they are put in parallel:
texcoord/vert
0, 0, / 0, 1
1, 0, / 1, 1
1, 1, / 1, 0
0, 1, / 0, 0
0, 0 / 0, 0
1, 0 / width, 0
0, 1 / 0, height
1, 1 / width, height
Now, if we reorder them by verts, you can see that the texcoords do not correspond
// sorted verts by "alphabetical" position.
0, 1, / 0, 0
0, 0, / 0, 1
1, 1, / 1, 0
1, 0, / 1, 1
0, 0 / 0, 0
0, 1 / 0, height
1, 0 / width, 0
1, 1 / width, height
As you can see, for each equivalent position, the second texture coordinate is inverted between the 2. That explains why the textures are y-flipped.
So if you want to fix it, simply flip them back on the second method:
const GLfloat texCoords[] = {
0, 1,
1, 1,
0, 0,
1, 0
};
Try using the same geometry in each case:
Immediate mode:
glBegin(GL_TRIANGLES);
glTexCoord2f(0, 0, 0); glVertex3f(0, 0, 0);
glTexCoord2f(1, 0, 0); glVertex3f(1, 0, 0);
glTexCoord2f(1, 1, 0); glVertex3f(1, 1, 0);
glTexCoord2f(1, 1, 0); glVertex3f(1, 1, 0);
glTexCoord2f(0, 1, 0); glVertex3f(0, 1, 0);
glTexCoord2f(0, 0, 0); glVertex3f(0, 0, 0);
glEnd();
Vertex arrays:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
const GLfloat vertexData[] = {
0, 0, 0,
width, 0, 0,
width, height, 0
width, height, 0
0, height, 0,
0, 0, 0,
};
const GLfloat texCoords[] = {
0, 0,
1, 0,
1, 1
1, 1
0, 1,
0, 0,
};
glVertexPointer(3, GL_FLOAT, 0, &vertexData);
glTexCoordPointer(2, GL_FLOAT, 0, &texCoords);
glDrawArrays(GL_TRIANGLES, 0, 6);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
Ok, try this complete GLUT program. I actually tested it this time around :)
#include <vector>
#include <GL/glut.h>
using namespace std;
size_t win_w = 0;
size_t win_h = 0;
double aspect_ratio = 0;
GLuint tex_obj;
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-10*aspect_ratio, 10*aspect_ratio, -10, 10, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex_obj);
glPushMatrix();
glScalef(5,5,0);
/*
glBegin(GL_TRIANGLES);
glTexCoord2f(0, 0); glVertex3f(0, 0, 0);
glTexCoord2f(1, 0); glVertex3f(1, 0, 0);
glTexCoord2f(1, 1); glVertex3f(1, 1, 0);
glTexCoord2f(1, 1); glVertex3f(1, 1, 0);
glTexCoord2f(0, 1); glVertex3f(0, 1, 0);
glTexCoord2f(0, 0); glVertex3f(0, 0, 0);
glEnd();
*/
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
const GLfloat vertexData[] = {
0, 0, 0,
1, 0, 0,
1, 1, 0,
1, 1, 0,
0, 1, 0,
0, 0, 0,
};
const GLfloat texCoords[] = {
0, 0,
1, 0,
1, 1,
1, 1,
0, 1,
0, 0,
};
glVertexPointer(3, GL_FLOAT, 0, &vertexData);
glTexCoordPointer(2, GL_FLOAT, 0, &texCoords);
glDrawArrays(GL_TRIANGLES, 0, 6);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glPopMatrix();
glFlush();
glutSwapBuffers();
}
void reshape(int w, int h)
{
win_w = w;
win_h = h;
aspect_ratio = (double)win_w / (double)win_h;
glViewport(0, 0, w, h);
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(800,600);
glutCreateWindow("Aspect Ratio");
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &tex_obj);
glBindTexture(GL_TEXTURE_2D, tex_obj);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
vector< unsigned char > pixels(4 * 3);
pixels[3*0+0] = 255;
pixels[3*0+1] = 0;
pixels[3*0+2] = 0;
pixels[3*1+0] = 0;
pixels[3*1+1] = 255;
pixels[3*1+2] = 0;
pixels[3*2+0] = 0;
pixels[3*2+1] = 0;
pixels[3*2+2] = 255;
pixels[3*3+0] = 255;
pixels[3*3+1] = 255;
pixels[3*3+2] = 255;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, &pixels[0]);
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}

Render to texture problem with alpha

When I render to texture, and then draw the same image, it seems to make everything darker. To get this image:
http://img24.imageshack.us/img24/8061/87993367.png
I'm rendering the upper-left square with color (1, 1, 1, .8) to a texture, then rendering that texture, plus the middle square (same color) to another texture, then finally that texture plus the lower-right square (same color) to the screen.
As you can see, each time I render to texture, everything gets a little darker.
My render-to-texture code looks like: (I'm using OpenGL ES on the iPhone)
// gen framebuffer
GLuint framebuffer;
glGenFramebuffersOES(1, &framebuffer);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
// gen texture
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// hook it up
glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, texture, 0);
if(glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES))
return false;
// set up drawing
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
glViewport(0, 0, Screen::Width, Screen::Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, Screen::Width, 0, Screen::Height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor4f(1, 1, 1, 1);
// do whatever drawing we'll do here
Draw();
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
Is there anything that I'm doing wrong here? Do you need more code to figure it out? What might be going on here?
I'm only guessing:
Drawing the first texture gives you 204 (0.8 * 255) in the RGB and alpha channels. When you draw for the second time (with GL_BLEND enabled, I presume), you're getting the light gray 204 RGB with 80% alpha which gives you a medium gray.
Solution: use glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA) and premultiply your colors.

Resources