OpenGL ES depth framebuffer GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT - opengl-es

I've been trying to add shadow mapping to my OpenGL ES project and I've just found out that my framebuffer status returns GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.
Here's my code to create the framebuffer :
// create fbo
int[] fboPtr = new int[1];
GLES30.glGenFramebuffers(1, fboPtr, 0);
fbo = fboPtr[0];
// use fbo
GLES30.glBindFramebuffer(GLES30.GL_FRAMEBUFFER, fbo);
// create depthMap
int[] depthMapPtr = new int[1];
GLES30.glGenTextures(1, depthMapPtr, 0);
depthMap = depthMapPtr[0];
// use depthMap
GLES30.glBindTexture(GLES30.GL_TEXTURE_2D, depthMap);
GLES30.glTexImage2D(GLES30.GL_TEXTURE_2D, 0, GLES30.GL_DEPTH_COMPONENT, size, size,
0, GLES30.GL_DEPTH_COMPONENT, GLES30.GL_FLOAT, null);
GLES30.glTexParameteri(GLES30.GL_TEXTURE_2D, GLES30.GL_TEXTURE_MIN_FILTER, GLES30.GL_NEAREST);
GLES30.glTexParameteri(GLES30.GL_TEXTURE_2D, GLES30.GL_TEXTURE_MAG_FILTER, GLES30.GL_NEAREST);
GLES30.glTexParameteri(GLES30.GL_TEXTURE_2D, GLES30.GL_TEXTURE_WRAP_S, GLES30.GL_REPEAT);
GLES30.glTexParameteri(GLES30.GL_TEXTURE_2D, GLES30.GL_TEXTURE_WRAP_T, GLES30.GL_REPEAT);
GLES30.glFramebufferTexture2D(GLES30.GL_FRAMEBUFFER, GLES30.GL_DEPTH_ATTACHMENT, GLES30.GL_TEXTURE_2D, depthMap, 0);
// draw buffer
int[] buffer = {GLES30.GL_NONE};
GLES30.glDrawBuffers(1, buffer, 0);
GLES30.glReadBuffer(GLES30.GL_NONE);
int status = GLES30.glCheckFramebufferStatus(GLES30.GL_FRAMEBUFFER);
I've bound the texture, so I don't know what can be causing the error.

Your internalFormat parameter for glTexImage2D isn't legal.
https://www.khronos.org/registry/OpenGL-Refpages/es3.0/html/glTexImage2D.xhtml
You need to use a sized internal format for depth. So I think this should work:
GLES30.glTexImage2D(GLES30.GL_TEXTURE_2D, 0, GLES30.GL_DEPTH_COMPONENT32F,
size, size, 0, GLES30.GL_DEPTH_COMPONENT, GLES30.GL_FLOAT, null);
Off topic: Learn to use the KHR_debug extension if you can - it gives you readable error messages, often with a precise reason why something failed.

Related

Why Compute Shader is slowing down the rendering API calls?

I am using compute shader to process the input buffer data and store it as output texture using imagestore().
After executing the compute shader, I have 3 render calls sequentially.
Compute Shader Code:
#version 310 es
precision mediump image2D;
layout(std430) buffer; // Sets the default layout for SSBOs
layout(local_size_x = 256) in; // 256 threads per work group
layout(binding = 0) readonly buffer InputBuf
{
uint input_buff[];
} inputbuff;
layout (rgba32f, binding = 1 ) uniform writeonly image2D out_teximg;
void main()
{
int idx = int(gl_GlobalInvocationID.x);
int idy = int(gl_GlobalInvocationID.y);
unsigned int inputpix = inputbuff[1024 * idy + idx];
// some calculation on inputpix and output is rcolor, bcolor, gcolor
imageStore(out_teximg, ivec2(idx , idy), vec4(rcolor, bcolor, gcolor, 1.0));
barrier();
};
Code:
void initCompute()
{
glGenTextures(1, &computeOutTex);
glGenBuffers(1, &inSSBOId);
}
uint inputBuffData = { .... }; // input buffer data
void execute_compute()
{
// compute shader code starts...
glUseProgram(computePgmId);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, computeOutTex);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, width, height);
glBindImageTexture(1, computeOutTex, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F); // binding is 1
glUniform1i( glGetUniformLocation(computePgmId, "out_teximg"), 0);
uint inputBuffSize = 1024 * 512 * 3;
glBindBuffer(GL_SHADER_STORAGE_BUFFER, inSSBOId);
glBufferData(GL_SHADER_STORAGE_BUFFER, inputBuffSize, inputBuffData, GL_STATIC_DRAW);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0 , inSSBOId); // binding is 0
glDispatchCompute(width / 256, height, 1);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
// glFinish();
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
glBindImageTexture(1, 0, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F); // binding is 1
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, 0);// binding is 0
}
int draw()
{
glBindFramebuffer(GL_FRAMEBUFFER, m_FBOId); // Offscreen Rendering
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(compute_pgm);
execute_compute();
glUseProgram(render_pgm1);
glViewport(0,0,w,h);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, computeOutTex);
glDrawElements(); // Render the texture data
// 2nd draw call
glUseProgram(render_pgm2);
....
....
glDrawElements();
// 3rd draw call
glUseProgram(render_pgm3);
....
....
glDrawElements();
glBindFramebuffer(GL_FRAMEBUFFER, 0); // unbind FBO
}
Here, the only 2nd draw call is taking more time after using compute shader.
If glFinish() is called after glMemoryBarrier(), then only execute_compute() call is slowed down.
Why compute shader is slowing down the subsequent draw calls?
Is glFinish() really needed?
The compute shader does not slow down the subsequent draw call. However, the compute shader itself takes some time to execute. Since you are setting a memory barrier, the subsequent draws have to wait.
The OpenGL commands are cached and are not executed immediately when they are called. GPU and CPU work in parallel. The CPU sends instructions to the GPU and the GPU processes them as quickly as possible.
glFinish gets everything ready and does not return until all previously called commands have been completed. glFinish itself is not "costly". It just seems "costly" when measuring the time on the CPU since it measures the time it takes to complete the previously called OpenGL commands.
Anyway glFinish is not needed here. All you need is the memory barrier. When using the memory barrier, the following OpenGL commands, which depend on this barrier, appear to take longer to complete. However they don't need any longer, they just have to wait until the condition indicated by the barrier is met.
In your case you need to use GL_ALL_BARRIER_BITS or GL_TEXTURE_FETCH_BARRIER_BIT, which reflects incoherent memory writes (e.g.: Image store) prior to the barrier to texture fetches after the barrier.

SwapBuffer Nvidia Crash

EDIT: I thought this was restricted to Attribute-Created GL contexts, but it isn't, so I rewrote the post.
Hey guys, whenever I call SwapBuffers(hDC), I get a crash. If I create it with WGL_CONTEXT_DEBUG_BIT_ARB, I get a
Too many posts were made to a semaphore.
from Windows as I call SwapBuffers. What could be the cause of this?
Update: No crash occurs if I don't draw, just clear and swap.
Here's a bit of the code with the irrelevant bits cut out:
static PIXELFORMATDESCRIPTOR pfd = // pfd Tells Windows How We Want Things To Be
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
32, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
24, // 24Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
if (!(hDC = GetDC(windowHandle)))
return false;
unsigned int PixelFormat;
if (!(PixelFormat = ChoosePixelFormat(hDC, &pfd)))
return false;
if (!SetPixelFormat(hDC, PixelFormat, &pfd))
return false;
hRC = wglCreateContext(hDC);
if (!hRC) {
std::cout << "wglCreateContext Failed!\n";
return false;
}
if (wglMakeCurrent(hDC, hRC) == NULL) {
std::cout << "Make Context Current Second Failed!\n";
return false;
}
... // OGL Buffer Initialization
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
glBindVertexArray(vao);
glUseProgram(myprogram);
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_SHORT, (void *)indexStart);
SwapBuffers(GetDC(window_handle));
I found the answer. The SwapBuffer(hDC) command was simply where the error occurred, but did not have anything to do with it. I believe my error was due to something related to my indices, as if I draw only the first mesh in the model, everything works as intended. Nvidia crashed with this error, Intel went on and disregarded it.
Nonetheless, thank you to Chris Becke for pointing out a future memory leak using GetDC(hwnd).

WebGL error when attempting to get color data from vec4

I'm having an issue while rendering a square in WebGL. When I run the program in Chrome, I'm getting the error:
GL ERROR :GL_INVALID_OPERATION : glDrawArrays: attempt to access out of range vertices in attribute 0
I've assumed this is because, at some point, the buffers are looking at the wrong arrays when trying to get data. I've pinpointed the issue to the
gl.vertexAttribPointer(pColorIndex, 4, gl.FLOAT, false, 0, 0);
line in the code below. i.e. if I change the 4 to a 2, the code will run, but not properly (as I'm looking at a vec4 for color data here). Is there an issue with the way my arrays are bound?
bindColorDisplayShaders();
// clear the framebuffer
gl.clear(gl.COLOR_BUFFER_BIT);
// bind the shader
gl.useProgram(shader);
// set the value of the uniform variable in the shader
var shift_loc = gl.getUniformLocation(shader, "shift");
gl.uniform1f(shift_loc, .5);
// bind the buffer
gl.bindBuffer(gl.ARRAY_BUFFER, vertexbuffer);
// get the index for the a_Position attribute defined in the vertex shader
var positionIndex = gl.getAttribLocation(shader, 'a_Position');
if (positionIndex < 0) {
console.log('Failed to get the storage location of a_Position');
return;
}
// "enable" the a_position attribute
gl.enableVertexAttribArray(positionIndex);
// associate the data in the currently bound buffer with the a_position attribute
// (The '2' specifies there are 2 floats per vertex in the buffer. Don't worry about
// the last three args just yet.)
gl.vertexAttribPointer(positionIndex, 2, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// bind the buffer with the color data
gl.bindBuffer(gl.ARRAY_BUFFER, chosencolorbuffer);
var pColorIndex = gl.getUniformLocation(shader, 'a_ChosenColor');
if(pColorIndex < 0){
console.log('Failed to get the storage location of a_ChosenColor');
}
gl.enableVertexAttribArray(pColorIndex);
gl.vertexAttribPointer(pColorIndex, 4, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// draw, specifying the type of primitive to assemble from the vertices
gl.drawArrays(gl.TRIANGLES, 0, numPoints);
You can only use either a uniform or a vertex attribute,
this are two different things.
When using a vertex attribute you have to match the amount of vertices in your position buffer, and get the location using gl.getAttribLocation.
When using a uniform you're not supplying its data via array buffers but using the gl.uniform* methods to set their value.
In your example gl.uniform4fv(pColorIndex, yourColorVector).

Generating and updating 8-bit gray-scale texture in OpenGL ES 2.0

For an OpenGL texture cache I need to initialize a large (≥ 2048x2048) texture and then frequently update little sections of it.
The following (pseudo-)code works:
// Setup texture
int[] buffer = new int[2048*2048 / 4]; // Generate dummy buffer with 1 byte per pixel
int id = glGenTexture();
glBindTexture(GL_TEXTURE_2D, id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 2048, 2048, 0, GL_ALPHA, GL_UNSIGNED_BYTE, buffer);
// Perform update
glBindTexture(GL_TEXTURE_2D, id);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, width, height, GL_ALPHA, GL_UNSIGNED_BYTE, data);
But I find the totally unnecessary creation of a 4MB int-buffer a bit undesirable to say the least. So, I tried the following instead:
// Setup texture
int id = glGenTexture();
glBindTexture(GL_TEXTURE_2D, id);
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 0, 0, 2048, 2048, 0);
This gave me a GL_INVALID_OPERATION error, which I believe is caused by the fact that the frame-buffer does not contain an alpha value, so rather than just setting that to 1, the call fails.
Next attempt:
// Setup texture
int id = glGenTexture();
glBindTexture(GL_TEXTURE_2D, id);
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, 0, 0, 2048, 2048, 0);
This works, but now my glTexSubImage2D call fails with GL_INVALID_OPERATION because it specifies GL_ALPHA instead of GL_LUMINANCE. So, I changed that as well to get:
// Perform update
glBindTexture(GL_TEXTURE_2D, id);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
And I changed my shader to read the value from the r rather than the a component.
This works on some devices, but on the iPhone 3GS, I still get the GL_INVALID_OPERATION error in the glTexSubImage2D call. Why? And how can I fix this? Is there some way, for example, to change the internal texture format? Or can I create some other framebuffer that does have an alpha component that I can use as the source for glCopyTexImage2D?
data can be NULL in your glTexImage2D() call if you just want to allocate an empty texture:
data may be a null pointer. In this case, texture memory is allocated to accommodate a texture of width and height. You can then download subtextures to initialize this texture memory. The image is undefined if the user tries to apply an uninitialized portion of the texture image to a primitive.

Render OpenGL ES 2.0 to image

I am trying to do some OpenGL ES 2.0 rendering to an image file, independent of the rendering being shown on the screen to the user. The image I'm rendering to is a different size than the user's screen. I just need a byte array of GL_RGB data. I'm familiar with glReadPixels, but I don't think it would do the trick in this case since I'm not pulling from an already-rendered user screen.
Pseudocode:
// Switch rendering to another buffer (framebuffer? renderbuffer?)
// Draw code here
// Save byte array of rendered data GL_RGB to file
// Switch rendering back to user's screen.
How can I do this without interrupting the user's display? I'd rather not have to flicker the user's screen, drawing my desired information for a single frame, glReadPixel-ing and then having it disappear.
Again, I don't want it to show anything to the user. Here's my code. Doesn't work.. am I missing something?
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
unsigned int canvasTexture;
bglGenTextures(1, &canvasTexture);
bglBindTexture(BGL_TEXTURE_2D, canvasTexture);
bglTexImage2D(BGL_TEXTURE_2D, 0, BGL_RGB, width, height, 0, BGL_RGB, BGL_UNSIGNED_BYTE, 0);
bglFramebufferTexture2D(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_TEXTURE_2D, canvasTexture, 0);
Matrix::matrix_t identity;
Matrix::LoadIdentity(&identity);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Draw(&identity, &identity, this);
bglFlush();
bglFinish();
byte *buffer = (byte*)Z_Malloc(width * height * 4, ZT_STATIC);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
SaveTGA("canvas.tga", buffer, width, height);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteTextures(1, &canvasTexture);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);
Here's the solution, for anybody who needs it:
// Create framebuffer
unsigned int canvasFrameBuffer;
glGenFramebuffers(1, &canvasFrameBuffer);
glBindFramebuffer(GL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
glGenRenderbuffers(1, &canvasRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, canvasRenderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, canvasRenderBuffer);
// Clear the target (optional)
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw whatever you want here
char *buffer = (char*)malloc(width * height * 3);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, buffer);
SaveTGA("canvas.tga", buffer, width, height); // Your own function to save the image data to a file (in this case, a TGA)
free(buffer);
// unbind frame buffer
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteRenderbuffers(1, &canvasRenderBuffer);
glDeleteFramebuffers(1, &canvasFrameBuffer);
You can render to a texture, read the pixels and draw a quad with that texture (if you want to show that to the user). It should not flicker but it degrades performance obviously.
On iOS for example:
OpenGL ES Render to Texture
Reading a openGL ES texture to a raw array

Resources