how to set SbDecodeTargetInfo for cobalt decode to texture? - cobalt

I implement the cobalt spherical feature. It work correctly for 1080p video, but if I play another resolution video such as 480p, the video is reversed.The test video is from https://ytlr-cert.appspot.com/2019/main.html
the format is YUVNV12.
I attach source core, could any one help to point out which caused it?
Thanks.
The reversed video:
My Source:
GL_CALL(glBindTexture(GL_TEXTURE_2D, out_info->planes[0].texture));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
GL_CALL(glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA,
decode_target->videoframe_info.Stride,
decode_target->videoframe_info.imageHeight, 0, GL_ALPHA,
GL_UNSIGNED_BYTE, decode_target->buffer_y));
out_info->planes[0].gl_texture_format = GL_ALPHA;
out_info->planes[0].width = decode_target->videoframe_info.Stride;
out_info->planes[0].height = decode_target->videoframe_info.imageHeight;
out_info->planes[0].content_region.right = decode_target->videoframe_info.imageWidth;
out_info->planes[0].content_region.bottom = decode_target->videoframe_info.imageHeight;
GL_CALL(glBindTexture(GL_TEXTURE_2D, 0));
GL_CALL(glBindTexture(GL_TEXTURE_2D, out_info->planes[1].texture));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
GL_CALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
GL_CALL(glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA,
decode_target->videoframe_info.Stride / 2,
decode_target->videoframe_info.imageHeight / 2, 0,
GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE,
decode_target->buffer_uv));
GL_CALL(glBindTexture(GL_TEXTURE_2D, 0));
out_info->planes[1].gl_texture_format = GL_LUMINANCE_ALPHA;
out_info->planes[1].width = decode_target->videoframe_info.Stride / 2;
out_info->planes[1].height = decode_target->videoframe_info.imageHeight / 2;
out_info->planes[1].content_region.right = decode_target->videoframe_info.imageWidth / 2;
out_info->planes[1].content_region.bottom = decode_target->videoframe_info.imageHeight / 2;
out_info->width = decode_target->videoframe_info.imageWidth;
out_info->height = decode_target->videoframe_info.imageHeight;
out_info->format = kSbDecodeTargetFormat2PlaneYUVNV12;

Related

Textures show black when using GL_LINEAR_MIPMAP_LINEAR in emscripten

I have a small program that just draws a full screen quad with a texture.
This program works fine when I run it on Windows.
However it seems to fail when compiling to WebGL2 (GLES3) using Emscripten. It just shows a black quad.
I have figured out that the problem happens only when I enable mipmapping.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
This is the code that initializes the texture:
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
struct Color { u8 r, g, b; };
const Color pixels[] = {
{255, 0, 0}, {0, 255, 0},
{0, 0, 255}, {255, 255, 0}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
EDIT:
I have found that, if instead of calling glGenerateMipmap, I specify the mip levels manually, it works!
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glTexImage2D(GL_TEXTURE_2D, 1, GL_SRGB8, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
Repo with the full code: https://github.com/tuket/emscripten_problem_with_mipmap_textures
EDIT2:
Another interesting finding: if instead of using a 3-channel texture, I use a 4-channel texture, it works!
struct Color { u8 r, g, b, a; };
const Color pixels[] = {
{255, 0, 0, 255}, {0, 255, 0, 255},
{0, 0, 255, 255}, {255, 255, 0, 255}
};
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glGenerateMipmap(GL_TEXTURE_2D);

Can you glBlitFramebuffer between different levels of the same texture?

Let's say I have a texture that I need to mipmap but I want the mipmapping done hardware accelerated. I decided the best route to take would be something like this:
int read = glGenFramebuffers();
int draw = glGenFramebuffers();
int wh = 256;
int glObject = glGenTextures();
glBindTexture(GL_TEXTURE_2D, glObject);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, wh, wh, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
for(int i = 0; i < mipLevels; ++i) {
glBindFramebuffer(GL_FRAMEBUFFER, read);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i);
glTexImage2D(GL_TEXTURE_2D, i + 1, GL_RGBA8, wh / 2, wh / 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, (ByeBuffer)null);
glBindFramebuffer(GL_FRAMEBUFFER, draw);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, glObject, i + 1);
glBindFramebuffer(GL_READ_FRAMEBUFFER, read);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, draw);
glBlitFramebuffer(0, 0, wh, wh, 0, 0, wh / 2, wh / 2, GL_COLOR_BUFFER_BIT, GL_LINEAR);
wh /= 2;
}
Both framebuffers return INCOMPLETE_ATTACHMENT in this block of code when glCheckFramebufferStatus is called. What am I doing wrong? Am I allocating the blank mipmap levels correctly?
Answer: I was using glTexImage2D to allocate blank mip levels when I should have used glTexStorage2D to allocate the blank levels under these circumstances. Here is a link to the manual page for glTexStorage2D for more details: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTexStorage2D.xhtml

Opengl is rendering the color of first pixel

I am trying to port my game engine from win/linux to mac os x and all works fine except for the textures. I use SOIL to load the textures and GLSL shaders to render and all it's done in C++.
I believe my shader is working properly:
gl_FragColor = texture2D( BSTextureUnit, textureCoordinates ) * BSTextureColor;
here is how I load the texture:
theTextureId = SOIL_load_OGL_texture
(
nameChar,
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
glBindTexture(GL_TEXTURE_2D, theTextureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
//glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
if(!theTextureId)
{
std::cout<<"texture NOT loaded successfully: "<<name<<'\n';
}
What it does is it loads the texture, but instead of rendering all the pixels, it gives the hole object the color of the first pixel in the image.
So, if the texture is this one:
it will render a green block.
EDIT:
the texture coordinates:
dataToSet[0].texCoord.s = texLeft;
dataToSet[0].texCoord.t = texTop;
dataToSet[1].texCoord.s = texLeft;
dataToSet[1].texCoord.t = texBottom;
dataToSet[2].texCoord.s = texRight;
dataToSet[2].texCoord.t = texBottom;
dataToSet[3].texCoord.s = texRight;
dataToSet[3].texCoord.t = texTop;
where: GLfloat texTop = 1, texBottom = 0, texLeft = 0, texRight = 1;
I also tried with the texture width and height instead of 0 and 1, and I got the same result. On windows it renders correctly with this texture coordinates.

Drawing YUV frames with OpenGL on mac

I'm decoding video with ffmpeg libraries and store decoded frames in array. Now i want to draw these frames on the screen with OpenGL. I googled and found that apple offers to use GL_APPLE_ycbcr_422 format for effective drawing. So i switched decoding frames in ffmpeg to PIX_FMT_YUYV422 format (packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr) which seems to be an equivalent of GL_APPLE_ycbcr_422 in OpenGL. Now i'm trying to draw frames on surface with this code:
GLsizei width = 2, height = 2;
uint8_t data[8] = {128,200,123,10,5,13,54,180};
GLuint texture_name;
glEnable(GL_TEXTURE_RECTANGLE_ARB);
assert(glGetError() == GL_NO_ERROR);
glGenTextures (1,&texture_name);
assert(glGetError() == GL_NO_ERROR);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texture_name);
assert(glGetError() == GL_NO_ERROR);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_ARB, width * height * 2, (void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_TRUE);
assert(glGetError() == GL_NO_ERROR);
// not sure about code above
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
assert(glGetError() == GL_NO_ERROR);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
assert(glGetError() == GL_NO_ERROR);
// end
glViewport(0, 0, width,height);
assert(glGetError() == GL_NO_ERROR);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
assert(glGetError() == GL_NO_ERROR);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB, width, height, 0,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glTexSubImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, 0, 0, width, height,
GL_YCBCR_422_APPLE,GL_UNSIGNED_SHORT_8_8_APPLE,
(void*)data);
assert(glGetError() == GL_NO_ERROR);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glScalef(1.0f, -1.0f, 1.0f);
glOrtho(0, width, 0, height, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBegin(GL_QUADS);
glVertex3f(0, 0, -1.0f);
glVertex3f(width, 0, -1.0f);
glVertex3f(width, height, -1.0f);
glVertex3f(0, height, -1.0f);
glEnd();
But i don't see correct frames. Instead i see corrupted pictures of my display. So i understand that my using of OpenGL is not correct and maybe i don't understand some fundamental things.
Please, help to correct and to understand my mistakes. If i should RTFM please give me a link that will help me, because i didn't find any useful info.
Here's what I use in one of my applications to set up a YUV 4:2:2 texture target for uploaded data from YUV frames pulled off of a CCD camera:
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_EXT);
glGenTextures(1, &textureName);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTextureRangeAPPLE(GL_TEXTURE_RECTANGLE_EXT, videoImageSize.width * videoImageSize.height * 2, videoTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_STORAGE_HINT_APPLE , GL_STORAGE_SHARED_APPLE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_EXT, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexImage2D(GL_TEXTURE_RECTANGLE_EXT, 0, GL_RGBA, videoImageSize.width, videoImageSize.height, 0, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, 0);
The actual upload to the texture is accomplished using the following:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_EXT, textureName);
glTexSubImage2D (GL_TEXTURE_RECTANGLE_EXT, 0, 0, 0, videoImageSize.width, videoImageSize.height, GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_REV_APPLE, videoTexture);
In the above, videoTexture is the memory location of the raw bytes from a captured YUV frame off of the camera. I'm also using Apple's storage hints in the above to accelerate the upload, although I never could get the GL_UNPACK_CLIENT_STORAGE_APPLE optimization to speed things up further.
This was copied and pasted out of a working application, and the only real differences I see is my use of GL_RGBA instead of GL_RGB in the getTexImage2D() and the use of GL_UNSIGNED_SHORT_8_8_REV_APPLE instead of GL_UNSIGNED_SHORT_8_8_APPLE. Both the ARB and EXT rectangle extensions resolve to the same thing on the Mac.

Render to texture problem with alpha

When I render to texture, and then draw the same image, it seems to make everything darker. To get this image:
http://img24.imageshack.us/img24/8061/87993367.png
I'm rendering the upper-left square with color (1, 1, 1, .8) to a texture, then rendering that texture, plus the middle square (same color) to another texture, then finally that texture plus the lower-right square (same color) to the screen.
As you can see, each time I render to texture, everything gets a little darker.
My render-to-texture code looks like: (I'm using OpenGL ES on the iPhone)
// gen framebuffer
GLuint framebuffer;
glGenFramebuffersOES(1, &framebuffer);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
// gen texture
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// hook it up
glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, texture, 0);
if(glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES))
return false;
// set up drawing
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
glViewport(0, 0, Screen::Width, Screen::Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, Screen::Width, 0, Screen::Height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor4f(1, 1, 1, 1);
// do whatever drawing we'll do here
Draw();
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
Is there anything that I'm doing wrong here? Do you need more code to figure it out? What might be going on here?
I'm only guessing:
Drawing the first texture gives you 204 (0.8 * 255) in the RGB and alpha channels. When you draw for the second time (with GL_BLEND enabled, I presume), you're getting the light gray 204 RGB with 80% alpha which gives you a medium gray.
Solution: use glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA) and premultiply your colors.

Resources