Scale OpenGL texture and return bitmap in CPU memory - macos

I have a texture on the GPU defined by an OpenGL textureID and target.
I need for further processing a 300 pixel bitmap in CPU memory (width 300 pixel, height proportional depending on the source width).
The pixel format should be RGBA, ARGB or BGRA with float components.
How can this be done?
Thanks for your reply.
I tried the following. But I get only white pixels back:
glEnable(GL_TEXTURE_2D);
// create render texture
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, 0,GL_RGBA, GL_FLOAT, 0);
unsigned int fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderedTexture, 0);
// draw texture
glBindTexture(GL_TEXTURE_2D, inTextureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex3f(0, 0, 0);
glTexCoord2f(0, 1); glVertex3f(0, 1, 0);
glTexCoord2f(1, 1); glVertex3f(1, 1, 0);
glTexCoord2f(1, 0); glVertex3f(1, 0, 0);
glEnd();
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status == GL_FRAMEBUFFER_COMPLETE)
{
}
unsigned char *buffer = CGBitmapContextGetData(mainCtx);
glReadPixels(0, 0, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, GL_RGBA, GL_FLOAT, buffer);
glDisable(GL_TEXTURE_2D);
glBindFramebuffer(GL_FRAMEBUFFER, 0); //
glDeleteFramebuffers(1, &fbo);

Create a second texture with the size 300*width/height x 300
Create a Framebuffer Object and attach the new texture as color buffer.
Set approrpiate texture filters for the (unscaled) source texture. You have the choice between point sampling (GL_NEAREST) and bilinear filtering (GL_LINEAR). If you are downscaling by more than a factor of 2 you might consider also using mipmapping, and might want to call glGenerateMipmap on the source texture first, and use one of the GL_..._MIPMAP_... minification filters. However, the availability of mipmapping will depend on how the source texture was created, if it is an immutable texture object without the mipmap pyramid, this won't work.
Render a textured object (with the original source texture) to the new texture. Most intuitive geometry would be a viewport-filling rectangle, most efficient would be a single triangle.
Read back the scaled texture with glReadPixels (via the FBO) or glGetTexImage (directly from the texture). For improved performance, you might consider asynchronous readbacks via Pixel Buffer Objects.

Related

Read data from texture with any internal format on ES

I am trying to read the data of a texture on OpenGL ES. The problem with my method is that framebuffers do not accept textures with GL_ALPHA as format. If the texture has GL_RGBA as format, everything works fine. I do not want to change the texture format to RGBA, so is there another way to read the texture data as GL_RGBA format even if the texture has the GL_ALPHA format?
I have created a texture with this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, width, height, 0, GL_ALPHA, GL_UNSIGNED_BYTE, null);
I am trying to read the data with a framebuffer with a texture attachment and glReadPixels
ByteBuffer pixels = memAlloc(4 * width * height); // This line is java specific, it works like a byte array
glBindTexture(GL_TEXTURE_2D, texture);
int fbo = glGenFramebuffers();
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
// glCheckFramebufferStatus(GL_FRAMEBUFFER) returns GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT after this line
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(fbo);
glBindTexture(GL_TEXTURE_2D, 0);

OpenGL ES3 framebuffer draw depth in red scale

So after hard work to make directional light shadow map work finally I can see a shadow map rendered on a quad but it is drawn only with depth GL_DEPTH_COMPONENT16 and type GL_UNSIGNED_SHORT , or GL_DEPTH_COMPONENT32F and type GL_FLOAT but it is in red scale not gray scale
the problem is I used many methods to calculate the depth to draw the shadow but no shadow appears.
glCullFace(GL_FRONT);
glGenFramebuffers(1, &depthMapFBO);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffers(1, GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glCullFace(GL_BACK);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
LOGI("framebuffer incomplete");
}
and the fragment is:
uniform mediump sampler2DShadow shadowMap;
......
float bias = 0.005;
float visibility = 1.0;
for (int i=0;i<4;i++){
int index = i;
visibility -= 0.2*(1.0-texture( shadowMap, vec3(FragPosLightSpace.xy + poissonDisk[index]/700.0, (FragPosLightSpace.z-bias)/FragPosLightSpace.w) ));
}
result =light.intensity* (visibility * (diffuse + specular));
...but it is in red scale not gray scale
A texture with an depth component format, such as GL_DEPTH_COMPONENT16 or GL_DEPTH_COMPONENT32F has only 1 color channel, the red color channel.
If you read data from a texture sampler, where a depth component texture is bound to, then the green, blue and alpha channel are set automatically.
The Image Format specification of Khronos group says:
Image formats do not have to store each component. When the shader samples such a texture, it will still resolve to a 4-value RGBA vector. The components not stored by the image format are filled in automatically. Zeros are used if R, G, or B is missing, while a missing Alpha always resolves to 1.Note: Texture swizzling can change what the missing values are.
Because of that, the red color is set, the green and blue color is set to 0 and the alpha channel is 1. The causes the opaque red surface.
If you want to read a grayscale color from a depth component texture, then you have to read the red color channel and you have to apply the red color channel to the green and blue color, too.
You have to adapt the code like this:
float depth = texture( shadowMap, ..... ).r;
vec3 depthGrayscale = vec3( depth );
or this:
vec3 depthGrayscale = texture( shadowMap, ..... ).rrr;

How to render to a depth texture in OpenGL ES on Android

I need to render to a depth texture in Android, and I'm having trouble figuring it out. I've searched the web for several days and pieced together the following code:
public void loadFrameBuffer(Context context, int resourceId) {
final Bitmap bitmap = createBitmap(context, resourceId);
// generate the framebuffer and texture object names
glGenFramebuffers(1, frameBuffers, 0);
glGenTextures(2, textures, 0);
// bind color texture and load the texture mip level 0 texels are RGB565
// no texels need to specified as we are going to draw into the texture
glBindTexture(GL_TEXTURE_2D, textures[COLOR_TEXTURE]);
texImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmap, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// bind depth texture and load the texture mip level 0
// no texels need to specified as we are going to draw into
// the texture
glBindTexture(GL_TEXTURE_2D, textures[DEPTH_TEXTURE]);
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// bind the framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffers[0]);
// specify texture as color attachment
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, textures[COLOR_TEXTURE],
0);
checkStatus();
// specify texture as depth attachment
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_TEXTURE_2D, textures[DEPTH_TEXTURE],
0);
checkStatus();
}
checkStatus() for the color attachment is a success: GL_FRAMEBUFFER_COMPLETE
however checkStatus() for the depth attachment is: GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT
The documentation for the error states:
Not all framebuffer attachment points are framebuffer attachment
complete. This means that at least one attachment point with a
renderbuffer or texture attached has its attached object no longer in
existence or has an attached image with a width or height of zero, or
the color attachment point has a non-color-renderable image attached,
or the depth attachment point has a non-depth-renderable image
attached, or the stencil attachment point has a non-stencil-renderable
image attached.
Color-renderable formats include GL_RGBA4, GL_RGB5_A1, and GL_RGB565.
GL_DEPTH_COMPONENT16 is the only depth-renderable format.
GL_STENCIL_INDEX8 is the only stencil-renderable format.
I'm at a loss, even with this description as to what is wrong. The color attachment works just fine.
EDIT:
I'm seeing this in the log just before the previously mentioned status error:
<core_glTexImage2D:501>: GL_INVALID_OPERATION
It appears to be because of this line of code:
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
EDIT:
I got past the errors. I switched from the call to:
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
which is actually wrong, and now I'm calling:
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, texWidth, texHeight, 0,
GL_DEPTH_COMPONENT, GLES30.GL_FLOAT, null);
which works. I don't see any more errors. However, I'm still having a problem.
Here's the model rendered straight to the default buffer (straight to the window):
But when I try to render the texture attached to my FrameBuffer mapped to the following:
// x, y, z, S, T
new float[]{
// left triangle
-1f, -1f, 0f, 0f, 0f, //
1f, 1f, 0f, 1f, 1f, //
-1f, 1f, 0f, 0f, 1f, //
// right triangle
-1f, -1f, 0f, 0f, 0f, //
1f, -1f, 0f, 1f, 0f, //
1f, 1f, 0f, 1f, 1f //
}
);
It stretches the texture:
If I remove the left triangle and render I get this:
And lastly, rendering the depth texture gives me all red:
Everything is working correctly, there is nothing to fix
Your depth texture is correct. There is nothing left to fix.
The problem seems to be that you expect something with more contrast. I downloaded your picture and enhanced the contrast in an image editor:
You can see that there are details in there! A lot of data has been lost here, however. Your screen probably has an 8-bit framebuffer at best, and then I download the result as a JPEG which quantizes it even further, but... the original depth texture itself is probably 16 bits. So I can guess that we've probably lost ~10 bits by the time I've made the above image.
To put it another way, the depth buffer has 16 bits of precision, but the screen only has like 8. So, of course you can't see the details. The data is fine.
Why is it red?
Of course it's red. It's an image with only one channel. If you want to color it differently, use texture swizzling, or apply a color in the fragment shader.

Is it possible that very small particles disappear as I use RTT?

I have developed more than 20 mobile apps using OpenGL ES 2.0. However, I am trying to make a renderer to use my apps in OSX so now I am using OpenGL v3.3 with GLSL v130. Yesterday, I ran into a problem that I can't use a texture(RTT) that I drew particles on Off-Screen FBO with GL_LINES 1.0 size (it is the max value in OpenGL 3.3 why??)
When I drew geometry on the Off Screen FBO and used it as a texture on On-screen, I was able to see that
and also if I draw small particles on On-screen I can clearly see those but if I try to draw that particle lines and try to use it as a texture on Main screen I can see only a black texture.
I have already checked GL ERRORs and back FBOs' status and GL blending options but I am still struggling to solve it .
Anyone has a idea to solve it ?
Even though I think my code is okay I attached a little code bellow
// AFTER generate and bind FBO, generate RTT
StarTexture fboTex;
fboTex.texture_width = texture_width;
fboTex.texture_height = texture_height;
glGenTextures(1, &fboTex.texture_id);
glBindTexture(GL_TEXTURE_2D,fboTex.texture_id);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texture_width, texture_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fboTex.texture_id, 0);
and this is drawing particles on BACK FBO
glUniformMatrix4fv( h_Uniforms[UNIFORMS_PROJECTION], 1, GL_FALSE, g_proxtrans.s);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_PARTICLE]);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(Vec3)*ParticleNumTotal*2, &p_particle_lc_xy[0]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_POSITION], 3, GL_FLOAT, 0, 0,0);
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_POSITION]);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_COLOR]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_COLOR], 4, GL_FLOAT, 0, 0,0);
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_COLOR]);
glLineWidth(Thickness); // 1.0 because it is maxium
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, h_VBO[VBO_INDEX_OFF1]);
glDrawElements(GL_LINES, 400, GL_UNSIGNED_INT, 0); // 200 lines
and when I draw that on the main screen
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear( GL_COLOR_BUFFER_BIT);
starfbo->bindingVAO1();
glViewport(0, 0, ogl_Width, ogl_Height);
glUseProgram(h_Shader_Program[Shader_Program_FINAL]);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_TEXCOORD2]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_UV2], 2, GL_FLOAT, 0, 0,0);
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_UV2]);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_SQCOORD2]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_POSITION3], 2, GL_FLOAT, 0, 0,0 );
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_POSITION3]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, h_VBO[VBO_INDEX_ON]);
glDrawElements(GL_TRIANGLES,sizeof(squareIndices)/sizeof(squareIndices[0]), GL_UNSIGNED_INT ,(void*)0);
glUniformMatrix4fv( h_Uniforms[UNIFORMS_PROJECTION], 1, GL_FALSE, g_proxtrans.s);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_PARTICLE]);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(Vec3)*ParticleNumTotal*2, &p_particle_lc_xy[0]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_POSITION], 3, GL_FLOAT, 0, 0,0);
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_POSITION]);
glBindBuffer(GL_ARRAY_BUFFER, h_VBO[VBO_COLOR]);
glVertexAttribPointer(h_Attributes[ATTRIBUTES_COLOR], 4, GL_FLOAT, 0, 0,0);
glEnableVertexAttribArray(h_Attributes[ATTRIBUTES_COLOR]);
glLineWidth(Thickness);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, h_VBO[VBO_INDEX_OFF1]);
glDrawElements(GL_LINES, 400, GL_UNSIGNED_INT, 0);
If the resolution of the rendered image is much larger than the size (in pixels) it ends up being rendered at, it's certainly possible that small features disappear entirely.
Picture an extreme case. Say you render a few thin lines into a 1000x1000 texture, lighting up a very small fraction of the total 1,000,000 pixels. Now you map this texture onto a quad that has a size of 10x10 pixels when displayed. The fragment shader is invoked once for each pixel (assuming no MSAA), which makes for 100 shader invocations. Each of these 100 invocations samples the texture. With linear sampling and no mipmapping, it will read 4 texels for each sample operation. In total, 100 * 4 = 400 texels are read while rendering the polygon. It's quite likely that reading these 400 texels out of the total 1,000,000 will completely miss all of the lines you rendered into the texture.
One way to reduce this problem is to use mipmapping. This will generally prevent the features from disappearing completely. But small features will still fade because more and more texels are averaged in higher mipmap levels, where most of the texels are black.
A better but slightly more complex approach is that instead of using automatically generated mipmaps, you create the mipmaps manually, by rendering the same content into each mipmap level.
It might be good enough to simply be careful that you're not making the texture too large. Or to create your own wide lines by drawing them as polygons instead of using line primitives.
glDrawElements(GL_LINES, 400, GL_UNSIGNED_INT, 0);
GL_UNSIGNED_INT can not be used in OpenGL ES Versus OpenGL. Oddly, it works for IOS but not Android.
The parameter must be GL_UNSIGNED_BYTE or GL_UNSIGNED_SHORT in OpenGL ES.

Render to texture problem with alpha

When I render to texture, and then draw the same image, it seems to make everything darker. To get this image:
http://img24.imageshack.us/img24/8061/87993367.png
I'm rendering the upper-left square with color (1, 1, 1, .8) to a texture, then rendering that texture, plus the middle square (same color) to another texture, then finally that texture plus the lower-right square (same color) to the screen.
As you can see, each time I render to texture, everything gets a little darker.
My render-to-texture code looks like: (I'm using OpenGL ES on the iPhone)
// gen framebuffer
GLuint framebuffer;
glGenFramebuffersOES(1, &framebuffer);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
// gen texture
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// hook it up
glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, texture, 0);
if(glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES))
return false;
// set up drawing
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
glViewport(0, 0, Screen::Width, Screen::Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, Screen::Width, 0, Screen::Height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor4f(1, 1, 1, 1);
// do whatever drawing we'll do here
Draw();
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
Is there anything that I'm doing wrong here? Do you need more code to figure it out? What might be going on here?
I'm only guessing:
Drawing the first texture gives you 204 (0.8 * 255) in the RGB and alpha channels. When you draw for the second time (with GL_BLEND enabled, I presume), you're getting the light gray 204 RGB with 80% alpha which gives you a medium gray.
Solution: use glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA) and premultiply your colors.

Resources