I met some problem about using gl_luminance to define FBO. Here is the code i used,
generateRenderToTexture(GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
related code is as follows,
TextureBuffer _maskTexture;
class TextureBuffer {
public:
GLuint texture;
GLuint frameBuffer;
GLenum internalformat;
GLenum format;
GLenum type;
int w,h;
TextureBuffer() : texture(0), frameBuffer(0) {}
void release()
{
if(texture)
{
glDeleteTextures(1, &texture);
texture = 0;
}
if(frameBuffer)
{
glDeleteFramebuffers(1, &frameBuffer);
frameBuffer = 0;
}
}
};
void generateRenderToTexture(GLint internalformat, GLenum format, GLenum type,
TextureBuffer &tb, int w, int h, bool linearInterp)
{
glGenTextures(1, &tb.texture);
glBindTexture(GL_TEXTURE_2D, tb.texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, internalformat, w, h, 0, format, type, NULL);
glGenFramebuffers(1, &tb.frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, tb.frameBuffer);
glClear(_glClearBits);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tb.texture, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
printf("Framebuffer status: %x", (int)status);
tb.internalformat = internalformat;
tb.format = format;
tb.type = type;
tb.w = w;
tb.h = h;
}
The question is when I use,
generateRenderToTexture(GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
The code went well. But if use gl_luminance instead,
generateRenderToTexture(GL_LUMINANCE, GL_LUMINANCE, GL_UNSIGNED_BYTE, _maskTexture, _imageWidthOriginal,
I don't know why i could not use GL_LUMINANCE to define the FBO. Anyone have some useful suggestions to solve this?
The only formats that are guaranteed to work as color FBO attachments in ES 2.0 are, according to table 4.5 in the spec document:
GL_RGBA4
GL_RGB5_A1
GL_RGB565
Support for rendering to GL_RGBA, which works for you, is not required by the standard. Many implementations support it, though. The OES_rgb8_rgba8 extension adds support for GL_RGB8 and GL_RGBA8 formats as render targets.
GL_LUMINANCE is not supported as a color-renderable format by the standard, and I can't find an extension for it either. It's possible that some implementations could support it, but you certainly can't count on it.
ES 3.0 lists GL_R8 as a color-renderable format. In ES 3.0, the RED/R formats replace the LUMINANCE/ALPHA formats from ES 2.0. So if you can move to ES 3.0, you have support to render to 1-component texture formats.
You're using non-extension FBO functions, which were introduced only with OpenGL-3. So unlike FBO extension functions (ending with ARB) those functions are available only with a OpenGL-3 context. In OpenGL-3 the GL_LUMINANCE and GL_ALPHA texture internal formats are deprecated, are not available in core profile. They have been replaced by the GL_RED texture format. You can use an appropriately written shader or texture swizzle parameters to make a GL_RED texture work just like GL_LUMINANCE (swizzle dst.rgb = texture.r) or GL_ALPHA (swizzle dst.a = texture.r).
I have solved by using GL_RG_EXT, or GL_RED_EXT, instead.
Related
I got some problem about OGL in macOS, as the following sample, it works well in my win system but get error at API glPushAttrib, glDrawArrays, and glPopAttrib and get GL_INVALID_OPERATION each. Any idea about this?
I had read some topic about the issue but seems not work on macOS and compare with win system.
My mac is OS X El Capitan 10.11.6 with compiler Xcode version 6.4
//gen a texture
glGenTextures(1, &FilteredTexture);
glBindTexture(GL_TEXTURE_2D, FilteredTexture);
printf("Start glTexImage2D FilteredTexture...");
glTexImage2D(GL_TEXTURE_2D, 0, TexInterFormat, cols, rows, 0, TexFormat, DataType, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
//gen a Renderbuffer
glGenRenderbuffers(1, &GlRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, GlRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, cols, rows);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
//gen a Framebuffer & attach
glGenFramebuffers(1, &GlFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, GlFramebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, FilteredTexture, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, GlRenderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
//gen shaderas program (ignore detail here)
program = loadProgramFromFile("empty.vert", "depth.frag", "quad.geom");
//do the computation
glBindFramebuffer(GL_FRAMEBUFFER, GlFramebuffer);
const static GLuint attachment_bufferss[] = { static_cast<GLuint>(GL_COLOR_ATTACHMENT0) };
glDrawBuffers(1, attachment_bufferss);
glPushAttrib(GL_VIEWPORT_BIT); //get GL_INVALID_OPERATION here
glViewport(0, 0, cols, rows);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(1);
glDrawArrays(GL_POINTS, 0, 1); //get GL_INVALID_OPERATION here
glDrawBuffers(1, attachment_bufferss);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glUseProgram(0);
glPopAttrib(); //get GL_INVALID_OPERATION here
glFinish();
full code can be downloaded here: https://1drv.ms/u/s!Au9Donvb28A9hgeor0v94Q5hKc0e
The errors occur because you're trying to use deprecated functions (glPushAttrib, glPopAttrib) in Core profile. To fix this, replace line
glPushAttrib(GL_VIEWPORT_BIT);
with
GLint vp [4];
glGetIntegerv (GL_VIEWPORT, vp);
and
glPopAttrib();
with
glViewport( vp[0], vp[1], vp[2], vp[3] );
The code first copies your current viewport into an array, then restores the viewport from the array.
i'm working on a school project and I've come across an issue with my FBO.
game is rendered in 2 passes:
1) I render to the shadow map texture using an FBO.
2) I render scene normally to the default FBO.
Issue is, for some reason, binding the FBO when i do the first pass slows down my game by roughly 200+ fps, I really don't know what could be wrong with the FBO since it's as barebones as possible.
If i were to render the shadow map without binding, so directly to the screen, it'd be 200 fps faster, so it's not an issue of rendering, it's an issue of binding the FBO I believe.
Anyways, here's the first pass function.
//renders to the depth buffer texture.
void firstPass()
{
static GLuint shadowID = Resources::getInstance().shadowShader;
shadowBuffer->bindForWriting();//make active frame buffer the shadow buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, shadowBuffer->TEXTURE_WIDTH, shadowBuffer->TEXTURE_HEIGHT);
//render to the shadow map from the point of view of the sunlight.
glUseProgram(shadowID);
... set some uniforms
scene->render(shadowID);
shadowBuffer->unBindForWriting();//set frame buffer to default
glUseProgram(0);
}
And here is my FBO class in its entirety.
ShadowMapFBO()
{
init();
}
void bindForWriting()
{
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, FBO);
}
void unBindForWriting()
{
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
}
void bindForReading(GLenum TextureUnit, GLuint texture)
{
glActiveTexture(TextureUnit);
glBindTexture(GL_TEXTURE_2D, texture);
}
void BindTexture(GLuint textureID, GLuint location)
{
glUniform1i(textureID, location);
}
void unBindForReading()
{
glBindTexture(GL_TEXTURE_2D, 0);
}
GLuint shadowTexture;
int TEXTURE_WIDTH = 2048;
int TEXTURE_HEIGHT = 2048;
GLuint FBO;
void init()
{
glGenFramebuffers(1, &FBO);
glBindFramebuffer(GL_FRAMEBUFFER, FBO);
shadowTexture = Util::createTexture(TEXTURE_WIDTH, TEXTURE_HEIGHT, true);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, shadowTexture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
GLenum Status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (Status != GL_FRAMEBUFFER_COMPLETE)
printf("FB error, status: 0x%x\n", Status);
else
printf("Shadow Buffer created successfully.\n");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Here's how I create my depth buffer texture.
static GLuint createTexture(int width, int height)
{
GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
int i;
i = glGetError();
if (i != 0)
{
std::cout << "Error happened while loading the texture: " << i << std::endl;
}
glBindTexture(GL_TEXTURE_2D, 0);
return textureId;
}
I'm trying to draw to a texture (and then use that texture on an object) with GLKit but I'm receiving GL ERROR: 0x0502 which I think it means invalid value passed to a function. The thing is, the error is fired somewhere inside the effects prepareToDraw method. The vertex arrays seem to be set up correctly since I can draw on the default frame buffer with no problem using the same set up. Is there something I'm missing?
GLint defaultFBO;
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO);
GLenum status;
glBindFramebuffer(GL_FRAMEBUFFER, _boundsTextureFramebuffer);
glBindTexture(GL_TEXTURE_2D, self.backgroundTexture.name);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8_OES, 1024, 1024, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.backgroundTexture.name, 0);
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
{
NSLog(#"Failed to initialize the texture framebuffer");
}
effect.constantColor = [self.color vectorValue];
[effect prepareToDraw];
glBindVertexArrayOES(self.vertexArray);
glDrawElements(_data.mode, (GLsizei)self.data.indicesCount, GL_UNSIGNED_INT, (void*)0);
glBindVertexArrayOES(0);
glBindFramebuffer(GL_FRAMEBUFFER, (GLuint)defaultFBO);
You're missing one important thing from the code you've posted: the render buffer. Adding one might solve your problem. Here is an example off the top of my head:
GLint defaultFBO;
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO);
GLint defaultRBO;
glGetIntegerv(GL_RENDERBUFFER_BINDING, &defaultRBO);
glGenFramebuffers(1, &_boundsTextureFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, _boundsTextureFramebuffer);
glGenRenderbuffers(1, &_boundsTextureRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _boundsTextureRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _boundsTextureRenderbuffer);
...
glBindFramebuffer(GL_FRAMEBUFFER, (GLuint)defaultFBO);
glBindRenderbuffer(GL_RENDERBUFFER, (GLuint)defaultRBO);
I'm using cpp/marmalade to make a game for ios, but sometimes textures render corrupted.
Here is the source texture file:
Example of a currupted texture:
I'm loading texture with this code:
VGTexture2D* VGTextureLoader::loadImage(std::string imagefile)
{
CIwImage img;
img.LoadFromFile(imagefile.c_str());
// Convert to an OpenGL ES native format
CIwImage nativeImg;
nativeImg.SetFormat(CIwImage::ABGR_8888);
img.ConvertToImage(&nativeImg);
// Generate texture object
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// Upload
uint32 width = img.GetWidth();
uint32 height = img.GetHeight();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nativeImg.GetTexels());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Create and return texture
VGTexture2D* tex = new VGTexture2D(texture, (float)width, (float)height);
return tex;
}
Your textures aren't being corrupted, but the channels do seem to be flipped. Could it be because you are converting the image to ABGR_8888 but then uploading it as GL_RGBA?
Here's my render loop:
Bind a custom FBO
Bind a texture (previously associated with the FBO as COLOR_ATTACHMENT0)
Render using a custom fragment shader which chooses fragment colors on the basis of a fractal algorithm. (If a fragment is not used by the algorithm, it is assigned the color black)
Rebind the window-provided framebuffer and renderbuffer. (on ios 5, this is the [view bindDrawable] method.
Clear the screen to white.
Render the fbo texture into a frame that is substantially smaller than the window itself.
Expected result:
The fractal should appear in the smaller frame. The frame should have a black background. The rest of the screen should be white.
Current result:
The entire screen is taken up by the fractal, as if I were rendering to the window provided fbo, and to my custom fbo/texture.
I don't really know what I'm doing wrong, so I'd be grateful for any help.
EDIT:
Fragment Shader:
void main()
{
highp vec2 pix = gl_FragCoord.xy;
lowp vec4 color = vec4(0.0,0.0,0.0,0.0);
//determine if fragment is part of the fractal using the algorithm
//if yes, change the value of the color vec4
gl_FragColor = color;
}
FBO Initialization:
//First create and bind the frame buffer
glGenFramebuffers(1, &frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
//Create a texture
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//Attach the texture to the framebuffer
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
printf("\n Incomplete Frame Buffer!");
}
Render Loop:
{
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, texture);
glViewport(0, 0, width, height);
//Call the fractal render, basically a plane of 4 vertices happening through GL_TRIANGLE_STRIP, but that calls the fragment shader above.
[self.view bindDrawable];
glClearColor(1.0,1.0,1.0,1.0);
matrix4x4 mvp = multiplyMatrices(projectionMatrix, modelView);
glUseProgram(shaderId);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(positionLocation, 4, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer(GL_ARRAY_BUFFER, texelBuffer);
glVertexAttribPointer(texelLocation, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glUniformMatrix4fv(mvpLocation, 1, 0, mvp.val);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureId);
glUniform1i(textureLocation, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, tVertices);
}
I hope this helps. Please let me know if you'd like any more information on what I'm doing.
I also noticed something very strange happening.
If, after binding the FBO, I try
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1.0,0.0,0.0,0.5);
...it is the window-provided framebuffer on which the clear actually happens.
Thank you guys for helping.