Drawing YUV frames with OpenGL on mac - cocoa

I'm decoding video with ffmpeg libraries and store decoded frames in array. Now i want to draw these frames on the screen with OpenGL. I googled and found that apple offers to use GL_APPLE_ycbcr_422 format for effective drawing. So i switched decoding frames in ffmpeg to PIX_FMT_YUYV422 format (packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr) which seems to be an equivalent of GL_APPLE_ycbcr_422 in OpenGL. Now i'm trying to draw frames on surface with this code:
GLsizei width = 2, height = 2;
uint8_t data[8] = {128,200,123,10,5,13,54,180};
GLuint texture_name;
glEnable(GL_TEXTURE_RECTANGLE_ARB);
assert(glGetError() == GL_NO_ERROR);
glGenTextures (1,&texture_name);
assert(glGetError() == GL_NO_ERROR);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texture_name);
assert(glGetError() == GL_NO_ERROR);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_ARB, width * height * 2, (void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_TRUE);
assert(glGetError() == GL_NO_ERROR);
// not sure about code above
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
assert(glGetError() == GL_NO_ERROR);
// end
glViewport(0, 0, width,height);
assert(glGetError() == GL_NO_ERROR);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
assert(glGetError() == GL_NO_ERROR);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB, width, height, 0,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexSubImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, 0, 0, width, height,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glScalef(1.0f, -1.0f, 1.0f);
glOrtho(0, width, 0, height, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBegin(GL_QUADS);
glVertex3f(0, 0, -1.0f);
glVertex3f(width, 0, -1.0f);
glVertex3f(width, height, -1.0f);
glVertex3f(0, height, -1.0f);
glEnd();
But i don't see correct frames. Instead i see corrupted pictures of my display. So i understand that my using of OpenGL is not correct and maybe i don't understand some fundamental things.
Please, help to correct and to understand my mistakes. If i should RTFM please give me a link that will help me, because i didn't find any useful info.

Here's what I use in one of my applications to set up a YUV 4:2:2 texture target for uploaded data from YUV frames pulled off of a CCD camera:
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_EXT);
glGenTextures(1, &textureName);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_EXT, videoImageSize.width * videoImageSize.height * 2, videoTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexImage2D(GL_TEXTURE_RECTANGLE_EXT, 0, GL_RGBA, videoImageSize.width, videoImageSize.height, 0, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, 0);
The actual upload to the texture is accomplished using the following:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTexSubImage2D (GL_TEXTURE_RECTANGLE_EXT, 0, 0, 0, videoImageSize.width, videoImageSize.height, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
In the above, videoTexture is the memory location of the raw bytes from a captured YUV frame off of the camera. I'm also using Apple's storage hints in the above to accelerate the upload, although I never could get the GL_UNPACK_CLIENT_STORAGE_APPLE optimization to speed things up further.
This was copied and pasted out of a working application, and the only real differences I see is my use of GL_RGBA instead of GL_RGB in the getTexImage2D() and the use of GL_UNSIGNED_SHORT_8_8_REV_APPLE instead of GL_UNSIGNED_SHORT_8_8_APPLE. Both the ARB and EXT rectangle extensions resolve to the same thing on the Mac.

Related

Read data from texture with any internal format on ES

I am trying to read the data of a texture on OpenGL ES. The problem with my method is that framebuffers do not accept textures with GL_ALPHA as format. If the texture has GL_RGBA as format, everything works fine. I do not want to change the texture format to RGBA, so is there another way to read the texture data as GL_RGBA format even if the texture has the GL_ALPHA format?
I have created a texture with this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, width, height, 0, GL_ALPHA, GL_UNSIGNED_BYTE, null);
I am trying to read the data with a framebuffer with a texture attachment and glReadPixels
ByteBuffer pixels = memAlloc(4 * width * height); // This line is java specific, it works like a byte array
glBindTexture(GL_TEXTURE_2D, texture);
int fbo = glGenFramebuffers();
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
// glCheckFramebufferStatus(GL_FRAMEBUFFER) returns GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT after this line
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(fbo);
glBindTexture(GL_TEXTURE_2D, 0);

Textures show black when using GL_LINEAR_MIPMAP_LINEAR in emscripten

I have a small program that just draws a full screen quad with a texture.
This program works fine when I run it on Windows.
However it seems to fail when compiling to WebGL2 (GLES3) using Emscripten. It just shows a black quad.
I have figured out that the problem happens only when I enable mipmapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
This is the code that initializes the texture:
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
struct Color { u8 r, g, b; };
const Color pixels[] = {
{255, 0, 0}, {0, 255, 0},
{0, 0, 255}, {255, 255, 0}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
EDIT:
I have found that, if instead of calling glGenerateMipmap, I specify the mip levels manually, it works!
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glTexImage2D(GL_TEXTURE_2D, 1, GL_SRGB8, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
Repo with the full code: https://github.com/tuket/emscripten_problem_with_mipmap_textures
EDIT2:
Another interesting finding: if instead of using a 3-channel texture, I use a 4-channel texture, it works!
struct Color { u8 r, g, b, a; };
const Color pixels[] = {
{255, 0, 0, 255}, {0, 255, 0, 255},
{0, 0, 255, 255}, {255, 255, 0, 255}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);

Can you glBlitFramebuffer between different levels of the same texture?

Let's say I have a texture that I need to mipmap but I want the mipmapping done hardware accelerated. I decided the best route to take would be something like this:
int read = glGenFramebuffers();
int draw = glGenFramebuffers();
int wh = 256;
int glObject = glGenTextures();
glBindTexture(GL_TEXTURE_2D, glObject);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, wh, wh, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
for(int i = 0; i < mipLevels; ++i) {
glBindFramebuffer(GL_FRAMEBUFFER, read);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i);
glTexImage2D(GL_TEXTURE_2D, i + 1, GL_RGBA8, wh / 2, wh / 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, (ByeBuffer)null);
glBindFramebuffer(GL_FRAMEBUFFER, draw);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i + 1);
glBindFramebuffer(GL_READ_FRAMEBUFFER, read);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, draw);
glBlitFramebuffer(0, 0, wh, wh, 0, 0, wh / 2, wh / 2, GL_COLOR_BUFFER_BIT, GL_LINEAR);
wh /= 2;
}
Both framebuffers return INCOMPLETE_ATTACHMENT in this block of code when glCheckFramebufferStatus is called. What am I doing wrong? Am I allocating the blank mipmap levels correctly?
Answer: I was using glTexImage2D to allocate blank mip levels when I should have used glTexStorage2D to allocate the blank levels under these circumstances. Here is a link to the manual page for glTexStorage2D for more details: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTexStorage2D.xhtml

Scale OpenGL texture and return bitmap in CPU memory

I have a texture on the GPU defined by an OpenGL textureID and target.
I need for further processing a 300 pixel bitmap in CPU memory (width 300 pixel, height proportional depending on the source width).
The pixel format should be RGBA, ARGB or BGRA with float components.
How can this be done?
Thanks for your reply.
I tried the following. But I get only white pixels back:
glEnable(GL_TEXTURE_2D);
// create render texture
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, 0,GL_RGBA, GL_FLOAT, 0);
unsigned int fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderedTexture, 0);
// draw texture
glBindTexture(GL_TEXTURE_2D, inTextureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex3f(0, 0, 0);
glTexCoord2f(0, 1); glVertex3f(0, 1, 0);
glTexCoord2f(1, 1); glVertex3f(1, 1, 0);
glTexCoord2f(1, 0); glVertex3f(1, 0, 0);
glEnd();
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status == GL_FRAMEBUFFER_COMPLETE)
{
}
unsigned char *buffer = CGBitmapContextGetData(mainCtx);
glReadPixels(0, 0, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, GL_RGBA, GL_FLOAT, buffer);
glDisable(GL_TEXTURE_2D);
glBindFramebuffer(GL_FRAMEBUFFER, 0); //
glDeleteFramebuffers(1, &fbo);
Create a second texture with the size 300*width/height x 300
Create a Framebuffer Object and attach the new texture as color buffer.
Set approrpiate texture filters for the (unscaled) source texture. You have the choice between point sampling (GL_NEAREST) and bilinear filtering (GL_LINEAR). If you are downscaling by more than a factor of 2 you might consider also using mipmapping, and might want to call glGenerateMipmap on the source texture first, and use one of the GL_..._MIPMAP_... minification filters. However, the availability of mipmapping will depend on how the source texture was created, if it is an immutable texture object without the mipmap pyramid, this won't work.
Render a textured object (with the original source texture) to the new texture. Most intuitive geometry would be a viewport-filling rectangle, most efficient would be a single triangle.
Read back the scaled texture with glReadPixels (via the FBO) or glGetTexImage (directly from the texture). For improved performance, you might consider asynchronous readbacks via Pixel Buffer Objects.

Render to texture problem with alpha

When I render to texture, and then draw the same image, it seems to make everything darker. To get this image:
http://img24.imageshack.us/img24/8061/87993367.png
I'm rendering the upper-left square with color (1, 1, 1, .8) to a texture, then rendering that texture, plus the middle square (same color) to another texture, then finally that texture plus the lower-right square (same color) to the screen.
As you can see, each time I render to texture, everything gets a little darker.
My render-to-texture code looks like: (I'm using OpenGL ES on the iPhone)
// gen framebuffer
GLuint framebuffer;
glGenFramebuffersOES(1, &framebuffer);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
// gen texture
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// hook it up
glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, texture, 0);
if(glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES))
return false;
// set up drawing
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
glViewport(0, 0, Screen::Width, Screen::Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, Screen::Width, 0, Screen::Height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor4f(1, 1, 1, 1);
// do whatever drawing we'll do here
Draw();
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
Is there anything that I'm doing wrong here? Do you need more code to figure it out? What might be going on here?
I'm only guessing:
Drawing the first texture gives you 204 (0.8 * 255) in the RGB and alpha channels. When you draw for the second time (with GL_BLEND enabled, I presume), you're getting the light gray 204 RGB with 80% alpha which gives you a medium gray.
Solution: use glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA) and premultiply your colors.

Resources