Render OpenGL ES 2.0 to image - opengl-es

I am trying to do some OpenGL ES 2.0 rendering to an image file, independent of the rendering being shown on the screen to the user. The image I'm rendering to is a different size than the user's screen. I just need a byte array of GL_RGB data. I'm familiar with glReadPixels, but I don't think it would do the trick in this case since I'm not pulling from an already-rendered user screen.
Pseudocode:
// Switch rendering to another buffer (framebuffer? renderbuffer?)
// Draw code here
// Save byte array of rendered data GL_RGB to file
// Switch rendering back to user's screen.
How can I do this without interrupting the user's display? I'd rather not have to flicker the user's screen, drawing my desired information for a single frame, glReadPixel-ing and then having it disappear.
Again, I don't want it to show anything to the user. Here's my code. Doesn't work.. am I missing something?
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
unsigned int canvasTexture;
bglGenTextures(1, &canvasTexture);
bglBindTexture(BGL_TEXTURE_2D, canvasTexture);
bglTexImage2D(BGL_TEXTURE_2D, 0, BGL_RGB, width, height, 0, BGL_RGB, BGL_UNSIGNED_BYTE, 0);
bglFramebufferTexture2D(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_TEXTURE_2D, canvasTexture, 0);
Matrix::matrix_t identity;
Matrix::LoadIdentity(&identity);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Draw(&identity, &identity, this);
bglFlush();
bglFinish();
byte *buffer = (byte*)Z_Malloc(width * height * 4, ZT_STATIC);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
SaveTGA("canvas.tga", buffer, width, height);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteTextures(1, &canvasTexture);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);

Here's the solution, for anybody who needs it:
// Create framebuffer
unsigned int canvasFrameBuffer;
glGenFramebuffers(1, &canvasFrameBuffer);
glBindFramebuffer(GL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
glGenRenderbuffers(1, &canvasRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, canvasRenderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, canvasRenderBuffer);
// Clear the target (optional)
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw whatever you want here
char *buffer = (char*)malloc(width * height * 3);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, buffer);
SaveTGA("canvas.tga", buffer, width, height); // Your own function to save the image data to a file (in this case, a TGA)
free(buffer);
// unbind frame buffer
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteRenderbuffers(1, &canvasRenderBuffer);
glDeleteFramebuffers(1, &canvasFrameBuffer);

You can render to a texture, read the pixels and draw a quad with that texture (if you want to show that to the user). It should not flicker but it degrades performance obviously.
On iOS for example:
OpenGL ES Render to Texture
Reading a openGL ES texture to a raw array

Related

Read data from texture with any internal format on ES

I am trying to read the data of a texture on OpenGL ES. The problem with my method is that framebuffers do not accept textures with GL_ALPHA as format. If the texture has GL_RGBA as format, everything works fine. I do not want to change the texture format to RGBA, so is there another way to read the texture data as GL_RGBA format even if the texture has the GL_ALPHA format?
I have created a texture with this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, width, height, 0, GL_ALPHA, GL_UNSIGNED_BYTE, null);
I am trying to read the data with a framebuffer with a texture attachment and glReadPixels
ByteBuffer pixels = memAlloc(4 * width * height); // This line is java specific, it works like a byte array
glBindTexture(GL_TEXTURE_2D, texture);
int fbo = glGenFramebuffers();
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
// glCheckFramebufferStatus(GL_FRAMEBUFFER) returns GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT after this line
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(fbo);
glBindTexture(GL_TEXTURE_2D, 0);

Scale OpenGL texture and return bitmap in CPU memory

I have a texture on the GPU defined by an OpenGL textureID and target.
I need for further processing a 300 pixel bitmap in CPU memory (width 300 pixel, height proportional depending on the source width).
The pixel format should be RGBA, ARGB or BGRA with float components.
How can this be done?
Thanks for your reply.
I tried the following. But I get only white pixels back:
glEnable(GL_TEXTURE_2D);
// create render texture
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, 0,GL_RGBA, GL_FLOAT, 0);
unsigned int fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderedTexture, 0);
// draw texture
glBindTexture(GL_TEXTURE_2D, inTextureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex3f(0, 0, 0);
glTexCoord2f(0, 1); glVertex3f(0, 1, 0);
glTexCoord2f(1, 1); glVertex3f(1, 1, 0);
glTexCoord2f(1, 0); glVertex3f(1, 0, 0);
glEnd();
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status == GL_FRAMEBUFFER_COMPLETE)
{
}
unsigned char *buffer = CGBitmapContextGetData(mainCtx);
glReadPixels(0, 0, (GLsizei)analyzeWidth, (GLsizei)analyzeHeight, GL_RGBA, GL_FLOAT, buffer);
glDisable(GL_TEXTURE_2D);
glBindFramebuffer(GL_FRAMEBUFFER, 0); //
glDeleteFramebuffers(1, &fbo);
Create a second texture with the size 300*width/height x 300
Create a Framebuffer Object and attach the new texture as color buffer.
Set approrpiate texture filters for the (unscaled) source texture. You have the choice between point sampling (GL_NEAREST) and bilinear filtering (GL_LINEAR). If you are downscaling by more than a factor of 2 you might consider also using mipmapping, and might want to call glGenerateMipmap on the source texture first, and use one of the GL_..._MIPMAP_... minification filters. However, the availability of mipmapping will depend on how the source texture was created, if it is an immutable texture object without the mipmap pyramid, this won't work.
Render a textured object (with the original source texture) to the new texture. Most intuitive geometry would be a viewport-filling rectangle, most efficient would be a single triangle.
Read back the scaled texture with glReadPixels (via the FBO) or glGetTexImage (directly from the texture). For improved performance, you might consider asynchronous readbacks via Pixel Buffer Objects.

glReadPixels always returns a black image

Some times ago I wrote a code to draw an OpenGL scene to a bitmap in Delphi RAD Studio XE7, that worked well. This code draw and finalize a scene, then get the pixels using the glReadPixels function. I recently tried to compile the exactly same code on Lazarus, however I get only a black image.
Here is the code
// create main render buffer
glGenFramebuffers(1, #m_OverlayFrameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, m_OverlayFrameBuffer);
// create and link color buffer to render to
glGenRenderbuffers(1, #m_OverlayRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, m_OverlayRenderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
m_OverlayRenderBuffer);
// create and link depth buffer to use
glGenRenderbuffers(1, #m_OverlayDepthBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, m_OverlayDepthBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
m_OverlayDepthBuffer);
// check if render buffers were created correctly and return result
Result := (glCheckFramebufferStatus(GL_FRAMEBUFFER) = GL_FRAMEBUFFER_COMPLETE);
...
// flush OpenGL
glFinish;
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glPixelStorei(GL_PACK_SKIP_ROWS, 0);
glPixelStorei(GL_PACK_SKIP_PIXELS, 0);
// create pixels buffer
SetLength(pixels, (m_pOwner.ClientWidth * m_Factor) * (m_pOwner.ClientHeight * m_Factor) * 4);
// is alpha blending or antialiasing enabled?
if (m_Transparent or (m_Factor <> 1)) then
// notify that pixels will be read from color buffer
glReadBuffer(GL_COLOR_ATTACHMENT0);
// copy scene from OpenGL to pixels buffer
glReadPixels(0,
0,
m_pOwner.ClientWidth * m_Factor,
m_pOwner.ClientHeight * m_Factor,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixels);
As I already verified and I'm 100% sure that something is really drawn on my scene (also on the Lazarus side), the GLext is well initialized and the framebuffer is correctly built (the condition glCheckFramebufferStatus(GL_FRAMEBUFFER) = GL_FRAMEBUFFER_COMPLETE returns effectively true), I would be very grateful if someone could point me what I'm doing wrong in my code, knowing one more time that on the same computer it works well in Delphi RAD Studio XE7 but not in Lazarus.
Regards

Save image from opengl (pyqt) in high resolution:

I have a PyQt application in which an QtOpenGL.QGLWidget shows a wind turbine.
I would like to save the turbine as an image in high resulution, but so far I am only able to save the rendered screen image, i.e. in my case 1680x1050 minus borders,toolbars etc.
glPixelStorei(GL_PACK_ALIGNMENT, 1)
data = glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE)
How can I get around this limitation?
EDIT
I have tried using a framebuffer,
from __future__ import division
import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from PIL import Image
import time, sys
import numpy as np
WIDTH = 400
HEIGHT = 300
def InitGL():
glMatrixMode(GL_PROJECTION)
gluPerspective(45.0, float(WIDTH) / float(HEIGHT), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def DrawGLScene():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef(0, 0., -3)
glutWireTeapot(1)
glFlush()
def capture_screen():
DrawGLScene()
glPixelStorei(GL_PACK_ALIGNMENT, 1)
data = glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGBA, GL_UNSIGNED_BYTE)
image = Image.fromstring("RGBA", (WIDTH, HEIGHT), data)
image.transpose(Image.FLIP_TOP_BOTTOM).show()
def capture_fbo(width=800, height=600):
fbo = glGenFramebuffers(1)
render_buf = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, render_buf)
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, fbo)
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, render_buf);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(1, 1., -3)
glScale(width / WIDTH, height / HEIGHT, 1)
glutWireTeapot(1.0)
glFlush()
glReadBuffer(GL_COLOR_ATTACHMENT0);
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
image = Image.fromstring("RGBA", (width, height), data)
image.transpose(Image.FLIP_TOP_BOTTOM).show()
glDeleteFramebuffers(1, [fbo]);
glDeleteRenderbuffers(1, [render_buf]);
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(WIDTH, HEIGHT)
window = glutCreateWindow("")
glutDisplayFunc(DrawGLScene)
InitGL()
DrawGLScene()
capture_screen()
capture_fbo()
glutMainLoop()
but nothing is drawn at the areas outside the normal screen window area
The way glReadPixels works is it reads the pixels from the currently selected framebuffer.
Not sure what a framebuffer is? Here's the wiki page: http://www.opengl.org/wiki/Framebuffer_Object
A quick explanation is, you can think of the framebuffer as a "canvas" that your graphics card draws on. You can have as many framebuffers as you want (sorta... most drivers define a limit somewhere), but only one will be displayed. This is the selected framebuffer and can only be as large as the window that you are displaying on screen.
Here's a tutorial on making a framebuffer: http://www.lighthouse3d.com/tutorials/opengl-short-tutorials/opengl_framebuffer_objects/
Modify it so that the framebuffer you are creating is the resolution that you want to take a screenshot from.
Once you have the framebuffer set up, you can draw to it like so:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer);
/* drawing code */
glReadPixels();
If you want to be able to still see something on screen, you will need to draw to the default frame buffer after drawing to the new framebuffer:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); //"0" means return to default framebuffer
/* same drawing code */
Note that this means you will be drawing your entire scene twice! Make sure your framerate can handle it.

How to efficiently copy depth buffer to texture on OpenGL ES

I'm trying to get some shadowing effects to work in OpenGL ES 2.0 on iOS by porting some code from standard GL. Part of the sample involves copying the depth buffer to a texture:
glBindTexture(GL_TEXTURE_2D, g_uiDepthBuffer);
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, 0, 0, 800, 600, 0);
However, it appears the glCopyTexImage2D is not supported on ES. Reading a related thread, it seems I can use the frame buffer and fragment shaders to extract the depth data. So I'm trying to write the depth component to the color buffer, then copying it:
// clear everything
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
// turn on depth rendering
glUseProgram(m_BaseShader.uiId);
// this is a switch to cause the fragment shader to just dump out the depth component
glUniform1i(uiBaseShaderRenderDepth, true);
// and for this, the color buffer needs to be on
glColorMask(GL_TRUE,GL_TRUE,GL_TRUE,GL_TRUE);
// and clear it to 1.0, like how the depth buffer starts
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// draw the scene
DrawScene();
// bind our texture
glBindTexture(GL_TEXTURE_2D, g_uiDepthBuffer);
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, width, height, 0);
Here is the fragment shader:
uniform sampler2D sTexture;
uniform bool bRenderDepth;
varying lowp float LightIntensity;
varying mediump vec2 TexCoord;
void main()
{
if(bRenderDepth) {
gl_FragColor = vec4(vec3(gl_FragCoord.z), 1.0);
} else {
gl_FragColor = vec4(texture2D(sTexture, TexCoord).rgb * LightIntensity, 1.0);
}
}
I have experimented with not having the 'bRenderDepth' branch, and it doesn't speed it up significantly.
Right now pretty much just doing this step its at 14fps, which obviously is not acceptable. If I pull out the copy its way above 30fps. I'm getting two suggestions from the Xcode OpenGLES analyzer on the copy command:
file://localhost/Users/xxxx/Documents/Development/xxxx.mm: error:
Validation Error: glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0,
960, 640, 0) : Height<640> is not a power of two
file://localhost/Users/xxxx/Documents/Development/xxxx.mm: warning:
GPU Wait on Texture: Your app updated a texture that is currently
used for rendering. This caused the CPU to wait for the GPU to
finish rendering.
I'll work to resolve the two above issues (perhaps they are the crux if of it). In the meantime can anyone suggest a more efficient way to pull that depth data into a texture?
Thanks in advance!
iOS devices generally support OES_depth_texture, so on devices where the extension is present, you can set up a framebuffer object with a depth texture as its only attachment:
GLuint g_uiDepthBuffer;
glGenTextures(1, &g_uiDepthBuffer);
glBindTexture(GL_TEXTURE_2D, g_uiDepthBuffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
// glTexParameteri calls omitted for brevity
GLuint g_uiDepthFramebuffer;
glGenFramebuffers(1, &g_uiDepthFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, g_uiDepthFramebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, g_uiDepthBuffer, 0);
Your texture then receives all the values being written to the depth buffer when you draw your scene (you can use a trivial fragment shader for this), and you can texture from it directly without needing to call glCopyTexImage2D.

Resources