OpenGL ES3 framebuffer draw depth in red scale - opengl-es

So after hard work to make directional light shadow map work finally I can see a shadow map rendered on a quad but it is drawn only with depth GL_DEPTH_COMPONENT16 and type GL_UNSIGNED_SHORT , or GL_DEPTH_COMPONENT32F and type GL_FLOAT but it is in red scale not gray scale
the problem is I used many methods to calculate the depth to draw the shadow but no shadow appears.
glCullFace(GL_FRONT);
glGenFramebuffers(1, &depthMapFBO);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffers(1, GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glCullFace(GL_BACK);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
LOGI("framebuffer incomplete");
}
and the fragment is:
uniform mediump sampler2DShadow shadowMap;
......
float bias = 0.005;
float visibility = 1.0;
for (int i=0;i<4;i++){
int index = i;
visibility -= 0.2*(1.0-texture( shadowMap, vec3(FragPosLightSpace.xy + poissonDisk[index]/700.0, (FragPosLightSpace.z-bias)/FragPosLightSpace.w) ));
}
result =light.intensity* (visibility * (diffuse + specular));

...but it is in red scale not gray scale
A texture with an depth component format, such as GL_DEPTH_COMPONENT16 or GL_DEPTH_COMPONENT32F has only 1 color channel, the red color channel.
If you read data from a texture sampler, where a depth component texture is bound to, then the green, blue and alpha channel are set automatically.
The Image Format specification of Khronos group says:
Image formats do not have to store each component. When the shader samples such a texture, it will still resolve to a 4-value RGBA vector. The components not stored by the image format are filled in automatically. Zeros are used if R, G, or B is missing, while a missing Alpha always resolves to 1.Note: Texture swizzling can change what the missing values are.
Because of that, the red color is set, the green and blue color is set to 0 and the alpha channel is 1. The causes the opaque red surface.
If you want to read a grayscale color from a depth component texture, then you have to read the red color channel and you have to apply the red color channel to the green and blue color, too.
You have to adapt the code like this:
float depth = texture( shadowMap, ..... ).r;
vec3 depthGrayscale = vec3( depth );
or this:
vec3 depthGrayscale = texture( shadowMap, ..... ).rrr;

Related

How to render to a depth texture in OpenGL ES on Android

I need to render to a depth texture in Android, and I'm having trouble figuring it out. I've searched the web for several days and pieced together the following code:
public void loadFrameBuffer(Context context, int resourceId) {
final Bitmap bitmap = createBitmap(context, resourceId);
// generate the framebuffer and texture object names
glGenFramebuffers(1, frameBuffers, 0);
glGenTextures(2, textures, 0);
// bind color texture and load the texture mip level 0 texels are RGB565
// no texels need to specified as we are going to draw into the texture
glBindTexture(GL_TEXTURE_2D, textures[COLOR_TEXTURE]);
texImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmap, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// bind depth texture and load the texture mip level 0
// no texels need to specified as we are going to draw into
// the texture
glBindTexture(GL_TEXTURE_2D, textures[DEPTH_TEXTURE]);
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// bind the framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffers[0]);
// specify texture as color attachment
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, textures[COLOR_TEXTURE],
0);
checkStatus();
// specify texture as depth attachment
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_TEXTURE_2D, textures[DEPTH_TEXTURE],
0);
checkStatus();
}
checkStatus() for the color attachment is a success: GL_FRAMEBUFFER_COMPLETE
however checkStatus() for the depth attachment is: GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT
The documentation for the error states:
Not all framebuffer attachment points are framebuffer attachment
complete. This means that at least one attachment point with a
renderbuffer or texture attached has its attached object no longer in
existence or has an attached image with a width or height of zero, or
the color attachment point has a non-color-renderable image attached,
or the depth attachment point has a non-depth-renderable image
attached, or the stencil attachment point has a non-stencil-renderable
image attached.
Color-renderable formats include GL_RGBA4, GL_RGB5_A1, and GL_RGB565.
GL_DEPTH_COMPONENT16 is the only depth-renderable format.
GL_STENCIL_INDEX8 is the only stencil-renderable format.
I'm at a loss, even with this description as to what is wrong. The color attachment works just fine.
EDIT:
I'm seeing this in the log just before the previously mentioned status error:
<core_glTexImage2D:501>: GL_INVALID_OPERATION
It appears to be because of this line of code:
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
EDIT:
I got past the errors. I switched from the call to:
texImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, bitmap, 0);
which is actually wrong, and now I'm calling:
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, texWidth, texHeight, 0,
GL_DEPTH_COMPONENT, GLES30.GL_FLOAT, null);
which works. I don't see any more errors. However, I'm still having a problem.
Here's the model rendered straight to the default buffer (straight to the window):
But when I try to render the texture attached to my FrameBuffer mapped to the following:
// x, y, z, S, T
new float[]{
// left triangle
-1f, -1f, 0f, 0f, 0f, //
1f, 1f, 0f, 1f, 1f, //
-1f, 1f, 0f, 0f, 1f, //
// right triangle
-1f, -1f, 0f, 0f, 0f, //
1f, -1f, 0f, 1f, 0f, //
1f, 1f, 0f, 1f, 1f //
}
);
It stretches the texture:
If I remove the left triangle and render I get this:
And lastly, rendering the depth texture gives me all red:
Everything is working correctly, there is nothing to fix
Your depth texture is correct. There is nothing left to fix.
The problem seems to be that you expect something with more contrast. I downloaded your picture and enhanced the contrast in an image editor:
You can see that there are details in there! A lot of data has been lost here, however. Your screen probably has an 8-bit framebuffer at best, and then I download the result as a JPEG which quantizes it even further, but... the original depth texture itself is probably 16 bits. So I can guess that we've probably lost ~10 bits by the time I've made the above image.
To put it another way, the depth buffer has 16 bits of precision, but the screen only has like 8. So, of course you can't see the details. The data is fine.
Why is it red?
Of course it's red. It's an image with only one channel. If you want to color it differently, use texture swizzling, or apply a color in the fragment shader.

GPUImage replace colors with colors from textures

Looking at GPUImagePosterizeFilter it seems like an easy adaptation to replace colors with pixels from textures. Say I have an image that is made from 10 greyscale colors. I would like to replace each of the pixel ranges from the 10 colors with pixels from 10 different texture swatches.
What is the proper way to create the textures? I am using the code below (I am not sure on the alpha arguments sent to CGBitmapContextCreate).
CGImageRef spriteImage = [UIImage imageNamed:fileName].CGImage;
size_t width = CGImageGetWidth(spriteImage);
size_t height = CGImageGetHeight(spriteImage);
GLubyte * spriteData = (GLubyte *) calloc(width*height*4, sizeof(GLubyte));
CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4, CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGContextDrawImage(spriteContext, CGRectMake(0, 0, width, height), spriteImage);
CGContextRelease(spriteContext);
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
free(spriteData);
return texName;
What is the proper way to pass the texture to the filter? In my main I have added:
uniform sampler2D fill0Texture;
In the code below texture is whats passed from the function above.
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(fill0Uniform, 1);
When ever I try to get an image from the spriteContext its nil and when I try using pixels from fill0Texture they are always black. I have thought about doing this with 10 chroma key iterations, but I think replacing all the pixels in a modified GPUImagePosterizeFilter is the way to go.
In order to match colors against the output from the PosterizeFilter, I am using the following code.
float testValue = 1.0 - (float(idx) / float(colorLevels));
vec4 keyColor = vec4(testValue, testValue, testValue, 1.0);
vec4 replacementColor = texture2D( tx0, textureCoord(idx));
float select = step(distance(keyColor,srcColor),.1);
return select * replacementColor;
If the color(already Posterized) passed in matches then the replacement color is returned. The textureCoord(idx) call looks up the replacement color from a gltexture.

Opengl is rendering the color of first pixel

I am trying to port my game engine from win/linux to mac os x and all works fine except for the textures. I use SOIL to load the textures and GLSL shaders to render and all it's done in C++.
I believe my shader is working properly:
gl_FragColor = texture2D( BSTextureUnit, textureCoordinates ) * BSTextureColor;
here is how I load the texture:
theTextureId = SOIL_load_OGL_texture
(
nameChar,
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
glBindTexture(GL_TEXTURE_2D, theTextureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
//glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
if(!theTextureId)
{
std::cout<<"texture NOT loaded successfully: "<<name<<'\n';
}
What it does is it loads the texture, but instead of rendering all the pixels, it gives the hole object the color of the first pixel in the image.
So, if the texture is this one:
it will render a green block.
EDIT:
the texture coordinates:
dataToSet[0].texCoord.s = texLeft;
dataToSet[0].texCoord.t = texTop;
dataToSet[1].texCoord.s = texLeft;
dataToSet[1].texCoord.t = texBottom;
dataToSet[2].texCoord.s = texRight;
dataToSet[2].texCoord.t = texBottom;
dataToSet[3].texCoord.s = texRight;
dataToSet[3].texCoord.t = texTop;
where: GLfloat texTop = 1, texBottom = 0, texLeft = 0, texRight = 1;
I also tried with the texture width and height instead of 0 and 1, and I got the same result. On windows it renders correctly with this texture coordinates.

Texture is all black

As far as I can tell, my first attempt to draw a texture on a triangle is being setup correctly, but it shows up as all black.
I am sending the image to OpenGL as such:
GLuint gridTexture;
glGenTextures(1, &gridTexture);
glBindTexture(GL_TEXTURE_2D, gridTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.x,
size.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
While I'm not sure how to test that "pixels" holds what I'd expect, I do know that the size.x and size.y variables are logging correctly for the PNG I'm using so I assume the pixels is working well also since they are both extracted together in my resource loader
My shaders are simple:
attribute vec4 Position;
attribute vec4 SourceColor;
attribute vec2 TextureCoordinate;
varying vec4 DestinationColor;
varying vec2 TextureCoordOut;
uniform mat4 Projection;
uniform mat4 Modelview;
void main(void)
{
DestinationColor = SourceColor;
gl_Position=Projection*Modelview*Position;
TextureCoordOut = TextureCoordinate;
}
fragment:
varying lowp vec4 DestinationColor;
varying mediump vec2 TextureCoordOut;
uniform sampler2D Sampler;
void main(void)
{
gl_FragColor = texture2D(Sampler, TextureCoordOut) * DestinationColor;
// gl_FragColor = DestinationColor; //this works and I see varied colors fine
}
I send texture coordinates from client memory like this:
glEnableVertexAttribArray(textCoordAttribute));
glVertexAttribPointer(textCoordAttribute, 2, GL_FLOAT, GL_FALSE, sizeof(vec2),&texs[0]);
The triangle and its vertices with texture coordinates are like this; I know the coordinates aren't polished, I just want to see something on the screen:
//omitting structures that I use to hold my vertex data, but you can at least see the vertices and the coordinates I am associating with them. The triangle draws fine, and if I disable the texture2D() function in the frag shader I can see the colors of the vertices so everything appears to be working except the texture itself.
top.Color=vec4(1,0,0,1);
top.Position=vec3(0,300,0);
texs.push_back(vec2(0,1));
right.Color=vec4(0,1,0,1);
right.Position=vec3(300,0,0);
texs.push_back(vec2(1,0));
left.Color=vec4(0,0,1,1);
left.Position=vec3(-300,0,0);
texs.push_back(vec2(0,0));
verts.push_back(top);
verts.push_back(right);
verts.push_back(left);
For good measure I tried binding the texture again with glBindTexture before drawing to make it was "active" but that made no difference.
I think there is probably a very simple step I am not doing somewhere but I can't find it anywhere.
For people suffering from textures showing up black, another reason I found was if you don't set up these simple parameters when creating the texture (before glTexImage2D), the texture shows up black
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_FALSE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
The issue is resolved by making my texture dimensions a power of 2 in length and width.
you must have a bound data to the texture before setting the filter. You must call glTexImage2D first
GLuint gridTexture;
glGenTextures(1, &gridTexture);
glBindTexture(GL_TEXTURE_2D, gridTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.x,
size.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

Render to texture problem with alpha

When I render to texture, and then draw the same image, it seems to make everything darker. To get this image:
http://img24.imageshack.us/img24/8061/87993367.png
I'm rendering the upper-left square with color (1, 1, 1, .8) to a texture, then rendering that texture, plus the middle square (same color) to another texture, then finally that texture plus the lower-right square (same color) to the screen.
As you can see, each time I render to texture, everything gets a little darker.
My render-to-texture code looks like: (I'm using OpenGL ES on the iPhone)
// gen framebuffer
GLuint framebuffer;
glGenFramebuffersOES(1, &framebuffer);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
// gen texture
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// hook it up
glFramebufferTexture2DOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_TEXTURE_2D, texture, 0);
if(glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES))
return false;
// set up drawing
glBindFramebufferOES(GL_FRAMEBUFFER_OES, framebuffer);
glViewport(0, 0, Screen::Width, Screen::Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, Screen::Width, 0, Screen::Height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor4f(1, 1, 1, 1);
// do whatever drawing we'll do here
Draw();
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
Is there anything that I'm doing wrong here? Do you need more code to figure it out? What might be going on here?
I'm only guessing:
Drawing the first texture gives you 204 (0.8 * 255) in the RGB and alpha channels. When you draw for the second time (with GL_BLEND enabled, I presume), you're getting the light gray 204 RGB with 80% alpha which gives you a medium gray.
Solution: use glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA) and premultiply your colors.

Resources