Drawing text in an OpenGL view - cocoa

I need to draw a counter onto an OpenGL view. Here is my source code for the video frame display.
cgl_ctx = CGLContextObj ( [[self openGLContext] CGLContextObj]);
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &_surfaceTexture);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
GLint swapInt = 1;//100;//1;
[[self openGLContext] setValues:&swapInt forParameter:NSOpenGLCPSwapInterval];
[[self openGLContext] makeCurrentContext];//make the current opengl the priority
glMatrixMode(GL_TEXTURE);
glGenTextures(1, &_surfaceTexture);
glBindTexture(GL_TEXTURE_2D, _surfaceTexture);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA,
displayWidth,//960,//1920,
displayHeight,//540,//1080,
0,
GL_BGRA,//GL_YCBCR_422_APPLE,//GL_RGB,//GL_APPLE_ycbcr_422,
GL_UNSIGNED_BYTE,//GL_BYTE,//GL_UNSIGNED_BYTE,//GL_UNSIGNED_SHORT_8_8_REV_APPLE,
IOSurfaceGetBaseAddress(videoBuffer->ioSurfaceDataBuffer[bufferCounter].RGBDest ));
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0);
glVertex3f(-1.0, -1.0, 0.0);
glTexCoord2f(1.0, 0.0);
glVertex3f(1.0, -1.0, 0.0);
glTexCoord2f(1.0, 1.0);
glVertex3f(1.0, 1.0, 0.0);
glTexCoord2f(0.0, 1.0);
glVertex3f(-1.0, 1.0, 0.0);
glEnd();
glFlush();
threadTracking ("view 11\n");
printf("view 2.2) videoBuffer->ioSurfaceDataBuffer[bufferCounter].played %d\n",videoBuffer->ioSurfaceDataBuffer[bufferCounter].played);
[[self openGLContext] flushBuffer];

I got the source from another video player that displays video information, here is what i added to add text directly the displayed frame with openGL.
char frameNumberBuffer[256];
CGColorSpaceRef frameCounterColorSpace;
CGContextRef frameCounterCglCtx;
frameCounterColorSpace = CGColorSpaceCreateWithName(kCGColorSpaceSRGB);
frameCounterCglCtx = CGBitmapContextCreate( IOSurfaceGetBaseAddress (videoBuffer->ioSurfaceDataBuffer[bufferCounter].RGBDest),
displayWidth, displayHeight, 8, IOSurfaceGetBytesPerRow(videoBuffer->ioSurfaceDataBuffer[bufferCounter].RGBDest),
frameCounterColorSpace, kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host);
CGContextSelectFont(frameCounterCglCtx, "Helvetica", 72.0, kCGEncodingMacRoman);
snprintf(frameNumberBuffer,256,"Frame Counter: %d",playCounter);
CGContextSetRGBFillColor(frameCounterCglCtx, 1.0f, 0.0f, 0.0f, 1.0f);
CGContextShowTextAtPoint(frameCounterCglCtx, 100.0f, 100.0f, frameNumberBuffer, strlen(frameNumberBuffer));
snprintf(frameNumberBuffer,256,"Buffer Counter: %d",bufferCounter);
CGContextSetRGBFillColor(frameCounterCglCtx, 0.0f, 0.0f, 1.0f, 1.0f);
CGContextShowTextAtPoint(frameCounterCglCtx, 100.0f, 200.0f, frameNumberBuffer, strlen(frameNumberBuffer));
where the IOSurfaceGetBaseAddress (videoBuffer->ioSurfaceDataBuffer[bufferCounter].RGBDest) returns the data pointer to my original frame data which was stored on an IOSurface

Related

OpenGL ES 2.0 Texture Mapping to Fill Quad on Raspberry Pi

I am trying to setup my Vertices and Texture Coordinates such that I have a single Quad ( 2 triangles ) covering the entire OpenGL Window. I want to then update the texture as I receive images from my camera.
It is working except for that my streaming texture never fills the entire screen and the texture pixels are repeated outside of the intended texture ( see images below ).
How do I setup my vertices and texture coordinates so that my texture will fill the quad without repeating?
Here is how I generate my texture:
glGenTextures(1, &m_dataFrame);
glBindTexture(GL_TEXTURE_2D, m_dataFrame);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, WIDTH, HEIGHT, 0, GL_RGBA, GL_UNSIGNED_BYTE, frame.bits());
// CUSTOM
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Initial Attempts:
// Setup the vertices and texture coordinates
float scaler = 1.0f;
static const GLfloat squareVertices[] = {
-(1.0f/scaler), -(1.0f/scaler), 0.0f, // First triangle bottom left
(1.0f/scaler), -(1.0f/scaler), 0.0f,
-(1.0f/scaler), (1.0f/scaler), 0.0f,
(1.0f/scaler), -(1.0f/scaler), 0.0f, // Second triangle bottom right
(1.0f/scaler), (1.0f/scaler), 0.0f,
-(1.0f/scaler), (1.0f/scaler), 0.0f,
};
static const GLfloat textureVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, -1.0f,
1.0f, 1.0f,
-1.0f, 1.0f,
};
This results in the following image:
// Setup the vertices and texture coordinates
float x = 1.0f;
float y = 1.0f;
static const GLfloat squareVertices[] = {
-(x*2), -(y*2), 0.0f, // First triangle bottom left
(x*2), -(y*2), 0.0f,
-(x*2), (y*2), 0.0f,
(x*2), -(y*2), 0.0f, // Second triangle bottom right
(x*2), (y*2), 0.0f,
-(x*2), (y*2), 0.0f,
};
static const GLfloat textureVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, -1.0f,
1.0f, 1.0f,
-1.0f, 1.0f,
};
This results in the following image:
This is what I've used for rendering a textured quad on the Raspberry Pi (in Swift, but the syntax is similar to what you'd use for C). Setting up the texture parameters:
glEnable(GLenum(GL_TEXTURE_2D))
glGenTextures(1, &cameraOutputTexture)
glActiveTexture(GLenum(GL_TEXTURE0));
glBindTexture(GLenum(GL_TEXTURE_2D), cameraOutputTexture)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_NEAREST)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_NEAREST)
uploading the texture:
glActiveTexture(GLenum(GL_TEXTURE0))
glBindTexture(GLenum(GL_TEXTURE_2D), cameraOutputTexture)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGB, 1280, 720, 0, GLenum(GL_RGB), GLenum(GL_UNSIGNED_BYTE), buffers[Int(currentBuffer)].start)
and rendering it:
glClear(GLenum(GL_COLOR_BUFFER_BIT))
shader.use()
let squareVertices:[GLfloat] = [
-1.0, -1.0,
1.0, -1.0,
-1.0, 1.0,
1.0, 1.0,
]
let textureCoordinates:[GLfloat] = [
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
]
glVertexAttribPointer(positionAttribute, 2, GLenum(GL_FLOAT), 0, 0, squareVertices)
glVertexAttribPointer(textureCoordinateAttribute, 2, GLenum(GL_FLOAT), 0, 0, textureCoordinates)
glUniform1i(GLint(textureCoordinateAttribute), 1)
glDrawArrays(GLenum(GL_TRIANGLE_STRIP), 0, 4)
eglSwapBuffers(display, surface)
I might be rotating the image to account for rotation in the camera that I'm using as an input source, so you may need to tweak the vertices or coordinates if you see your image rendering upside-down.

snapshot by glReadPixels. Then flip the pixels content. But the image quality has been decreased

All.
Now I am using unity3D to develop the game. And I want to save the content of each frame by AVFoundation as mp4 file. But I met some problem while I process the snapshot. After I use glReadPixels to obtain the data saved in render buffer, vertex shader and fragment shader is used to help me turn update side down the pixels content. But, after flipping each frame, I found that the quality of each frame has been decreased a lot. So, anyone has met this kind of case before.
Here is the code related.
The snapshot part,
- (void *)snapshot
{
// NSLog(#"snapshot used here");
GLint backingWidth1, backingHeight1;
glBindRenderbufferOES(GL_RENDERBUFFER_OES, mainDisplaySurface->systemColorRB);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth1);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight1);
NSInteger x = 0, y = 0, width = backingWidth1, height = backingHeight1;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
if (transformData == NULL)
{
NSLog(#"transformData initial");
transformData = new loadDataFromeX();
transformData->setupOGL(backingWidth1, backingHeight1);
}
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
transformData->drawingOGL(data);
return data;
}
Here, transformData is an c++ class to help me to the flipping work.
in the function, setOGL(), all the textures and framebuffers have been constructed.
in the function drawingOGL(), the flipping work has been done by passing through the vertex shader and fragment shader. The details of this function is listed below,
int loadDataFromeX::drawingOGL(unsigned char* data)
{
//load data to the texture;
glDisable(GL_DEPTH_TEST);
glBindFramebuffer(GL_FRAMEBUFFER ,transFBO.frameBuffer);
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1., 0., 0., 1.);
glViewport(0, 0, imageWidth, imageHeight);
GLfloat vertex_postions[] = {
-1.0f, -1.0f, -10.0f,
1.0f, -1.0f, -10.0f,
-1.0f, 1.0f, -10.0f,
1.0f, 1.0f, -10.0f
};
GLfloat texture_coords[] = { //left up corner is (0.0)
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
glUseProgram(gl_program_id);
glVertexAttribPointer(gl_attribute_position, 3, GL_FLOAT, GL_FALSE, 0,vertex_postions);
glEnableVertexAttribArray(gl_attribute_position);
glVertexAttribPointer(gl_attribute_texture_coordinate, 2, GL_FLOAT, GL_FALSE, 0,texture_coords);
glEnableVertexAttribArray(gl_attribute_texture_coordinate);
// Load textures
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
if(flag)
{
flag = false;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
else
{
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, data);
}
glUniform1i(glGetUniformLocation(gl_program_id, "inputImageTexture"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glUniformMatrix4fv(mvpLocation, 1, 0, gComputeMVP);
glDrawArrays(GL_TRIANGLE_STRIP ,0 ,4);
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(0, 0, imageWidth, imageHeight,GL_RGBA, GL_UNSIGNED_BYTE, data);
glFinish();
cout<<"data: "<<(int)data[0]<<"; "<<(int)data[1]<<", "<<(int)data[2]<<endl;
return 1;
}
The vertex shader ans fragment shader have been provided below,
static float l = -1.f, r = 1.f;
static float b = -1.f, t = 1.f;
static float n = 0.1f, f = 100.f;
static float gComputeMVP[16] = {
2.0f/(r-l), 0.0f, 0.0f, 0.0f,
0.0f, 2.0f/(t-b), 0.0f, 0.0f,
0.0f, 0.0f, -2.0f/(f-n), 0.0f,
-(r+l)/(r-l), -(t+b)/(t-b), -(f+n)/(f-n), 1.0f
};
// Shader sources
const GLchar* vertex_shader_str =
"attribute vec4 position;\n"
"attribute vec4 inputTextureCoordinate;\n"
"varying mediump vec2 textureCoordinate;\n"
"uniform mat4 mvpMatrix;\n"
"void main()\n"
"{\n"
" gl_Position = position;\n"
" gl_Position = mvpMatrix * position;\n"
" textureCoordinate = inputTextureCoordinate.xy;\n"
"}";
const char* fragment_shader_str = ""
" varying mediump vec2 textureCoordinate;\n"
"\n"
" uniform sampler2D inputImageTexture;\n"
" \n"
" void main()\n"
" {\n"
" mediump vec4 Color = texture2D(inputImageTexture, textureCoordinate);\n"
" gl_FragColor = vec4(Color.z, Color.y, Color.x, Color.w);\n"
" }";
I don't know why the quality of each frame has been decreased. And also, when I compare the output of variable, data, before and after using drawingOGL, as these two lines shown below,
cout<<"data: "<<(int)data[0]<<"; "<<(int)data[1]<<", "<<(int)data[2]<<endl;
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
The first line gave the right pixel value. But, the second line always gave ZERO. It's really strange, right?
I have found out the reason to this strange problem. It was caused by the context of unity3D. I'm not familiar with unity3d. So, maybe, only someone like me will do something stupid like this. There is some special setting related with OpenGL ES in the context, belonging to unity3d. So, in order to finish the task in snapshot, a new context has to be established and only activate it when the snapshot is working.
In order to solve the problem, I construct an individual context (EAGLContext*) for the snapshot task like this,
- (void *)snapshot
{
// NSLog(#"snapshot used here");
GLint backingWidth1, backingHeight1;
glBindRenderbufferOES(GL_RENDERBUFFER_OES, mainDisplaySurface->systemColorRB);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth1);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight1);
NSInteger x = 0, y = 0, width = backingWidth1, height = backingHeight1;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_BGRA, GL_UNSIGNED_BYTE, data);
NSLog(#"data %d, %d, %d", (int)data[0], (int)data[1], (int)data[2]);
NSLog(#"backingWidth1 : %d, backingHeight1: %d", backingWidth1, backingHeight1);
if (transformData == NULL)
{
mycontext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
[EAGLContext setCurrentContext: mycontext];
NSLog(#"transformData initial");
transformData = new loadDataFromeX();
transformData->setupOGL(backingWidth1, backingHeight1);
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
{
[EAGLContext setCurrentContext: mycontext];
transformData->drawingOGL(data);
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
}
When the resources used for snapshot is to be released, the codes is written like this,
if (transformData != NULL)
{
{
[EAGLContext setCurrentContext: mycontext];
transformData->destroy();
delete transformData;
transformData = NULL;
[EAGLContext setCurrentContext: mainDisplaySurface->context];
}
[mycontext release];
mycontext = nil;
}

How could I move image smoothly within QGLWidget window?

I am writing a small program to simulate animation. I use QGLWidget to display a dynamic image to mimic picture animation. The basic idea is to use texture and update it through QGLFamebufferOject::blitFramebuffer. Each time, the image array will be changed and then frame buffer will be changed.
The code can run and the image can be moved forward, but image has chopping problem, or flicking, not smooth.
Following is the code for a class:
GLWidget::GLWidget(QWidget *parent) : QGLWidget(QGLFormat(), parent)
{
makeCurrent();
if (QGLFramebufferObject::hasOpenGLFramebufferBlit()) {
QGLFramebufferObjectFormat format;
render_fbo = new QGLFramebufferObject(640, 480, format);
texture_fbo = new QGLFramebufferObject(640, 480);
} else {
render_fbo = new QGLFramebufferObject(640, 480);
texture_fbo = render_fbo;
}
tile_list = glGenLists(1);
glNewList(tile_list, GL_COMPILE);
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f);
}
glEnd();
glEndList();
backbufferImg = new unsigned char[640 * 480 * sizeof(unsigned char) * 3];
}
GLWidget::~GLWidget()
{
}
void GLWidget::initializeGL()
{
}
void GLWidget::resizeGL(int width, int height)
{
}
void GLWidget::paintGL()
{
}
void GLWidget::saveGLState()
{
glPushAttrib(GL_ALL_ATTRIB_BITS);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
}
void GLWidget::restoreGLState()
{
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glPopAttrib();
}
void GLWidget::changeTextureImage(int nextColumn, int direction)
{
// code about chaning backbufferImg
// ...
// call draw
draw();
}
void GLWidget::draw()
{
makeCurrent();
QPainter p(this); // used for text overlay
// save the GL state set for QPainter
saveGLState();
QPainter fbo_painter(render_fbo);
QImage renderImg(backbufferImg, 640, 480, QImage::Format_RGB888);
fbo_painter.begin(render_fbo);
QRect rect(640, 480);
fbo_painter.drawImage(rect, renderImg, rect);
fbo_painter.end();
if (render_fbo != texture_fbo) {
QGLFramebufferObject::blitFramebuffer(texture_fbo, rect, render_fbo, rect);
} else {
// qDebug() << "render_fbo == texture_fbo";
}
// draw into the GL widget
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, width(), height());
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBindTexture(GL_TEXTURE_2D, texture_fbo->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glPushMatrix();
glScalef(1.0f, 1.0f, 1.0f);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glCallList(tile_list);
glPopMatrix();
// restore the GL state that QPainter expects
restoreGLState();
glFinish();
}
Qt's QGLWidget expects that you perform all drawing operations from its overloaded paintGL function so that it iteracts nicely with Qt's double buffer management.
Also why are you taking that detour over a FBO to update a texture? This is highly inefficient and introduces a lot of synchronization points. Use a Pixel Buffer Object and update the texture with glTexSubImage2D.
Thank you very much! I have changed the code and put the drawing code inside paintGL and didn't use FBO's blit method. MUch better, but still need to improve the code. The following is the code:
GLWidget::GLWidget(QWidget *parent) :
QGLWidget(QGLFormat(), parent)
{
makeCurrent();
fbo = new QGLFramebufferObject(WIDTH, HEIGHT);
tile_list = glGenLists(1);
glNewList(tile_list, GL_COMPILE);
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f);
}
glEnd();
glEndList();
backbufferImg = new unsigned char[WIDTH * HEIGHT * sizeof(unsigned char) * 3];
}
GLWidget::~GLWidget()
{
glDeleteLists(tile_list, 1);
delete fbo;
delete backbufferImg;
}
void GLWidget::initializeGL()
{
}
void GLWidget::resizeGL(int width, int height)
{
}
void GLWidget::paintGL()
{
glViewport(0, 0, fbo->size().width(), fbo->size().height());
glMatrixMode(GL_MODELVIEW);
// render to the framebuffer object
fbo->bind();
glBindTexture(GL_TEXTURE_2D, refreshTexture);
glCallList(tile_list);
fbo->release();
//////////////////////////////////////////////////////////////////////
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, width(), height());
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBindTexture(GL_TEXTURE_2D, fbo->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glPushMatrix();
glScalef(1.0f, 1.0f, 1.0f);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glCallList(tile_list);
glPopMatrix();
// restore the GL state that QPainter expects
restoreGLState();
glFinish();
}
void GLWidget::saveGLState()
{
glPushAttrib(GL_ALL_ATTRIB_BITS);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
}
void GLWidget::restoreGLState()
{
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glPopAttrib();
}
void GLWidget::refreshTextureImage(int nextColumn, int direction)
{
// upadte backbufferImg
// add code...
QImage renderImg(backbufferImg, WIDTH, HEIGHT, QImage::Format_RGB888);
refreshTexture = bindTexture(renderImg);
updateGL();
}
I will try the method you suggested. But I think framebuffer and pixel buffer in the same level. It means there is no much performance difference. For glTexSubImage2D, I am not quite sure, I need to check it.
Thank you so much.

IOS OpenGL ES - Rotate Camera around the scene

This is my code:
- (void)drawView {
// Define square for flowers head (vertecies)
const GLfloat flowerHead[] = {
-1.0, 1.0, 1.0, // top left
-1.0, -1.0, 1.0, // bottom left
1.0, -1.0, 1.0, // bottom right
1.0, 1.0, 1.0
};
// Define the texture coordinates --> as the flowerHead vertecies
const GLshort flowerHeadTextureCoord[] = {
0, 1, // top left
0, 0, // bottom left
1, 0, // bottom right
1, 1 // top right
};
rota = 1.0;
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glViewport(0, 0, backingWidth, backingHeight);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glTexCoordPointer(2, GL_SHORT, 0, flowerHeadTextureCoord);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// flower #1
glPushMatrix();
{
glEnable(GL_ALPHA_TEST); // enable alpha -> transparency state
glTranslatef(0.0, 0.0, -6.0); // positioning the flower
glRotatef(55.0, 1.0, 0.0, 0.0); // rotate flower on (x) axis
glVertexPointer(3, GL_FLOAT, 0, flowerHead); // pointer to the flowerHead vertecies array
glEnableClientState(GL_VERTEX_ARRAY); // enable the vertex array state
// Draw the front face
// Bind texture
glBindTexture(GL_TEXTURE_2D, textures[0]);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
glPopMatrix();
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
[self checkGLError:NO];
}
Now this is my drawing code...all I need now is to rotate the camera around the object (and not the object itself)...
My setupView function:
const GLfloat zNear = 0.1, zFar = 1000.0, fieldOfView = 60.0;
GLfloat size;
// Set the general polygon properties - for transparency!!!
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
glAlphaFunc(GL_GREATER,0.1f);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
size = zNear * tanf(DEGREES_TO_RADIANS(fieldOfView) / 2.0);
// This give us the size of the iPhone display
CGRect rect = self.bounds;
glFrustumf(-size, size, -size / (rect.size.width / rect.size.height), size / (rect.size.width / rect.size.height), zNear, zFar);
glViewport(0, 0, rect.size.width, rect.size.height);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);

"MouseDragged" but NSOpenGlView not refreshed until "MouseUp"

I have a curious problem about an App which MainView is inherited from NSOpenGlView.
I am able to draw many things in it, like a basic OpenGL tutorial.... but when I try to implement a basic trackball, it looks like the app waits until I release the mouse button before it refreshes my view... even if I call directly the "DrawRect" callback from the "MouseDragged" callback, it acts the same. Then, I really can't find out how to do such a basic thing and what is the problem about my solution..
Any idea?
My NSOpenGLVIew has double buffer, and and additional depth buffer.
Here are the main messages.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- (void)mouseDragged:(NSEvent *)theEvent {
//[[self openGLContext] makeCurrentContext];
float curPos[3], dx, dy, dz;
dx = curPos[0] - lastPos[0];
dy = curPos[1] - lastPos[1];
dz = curPos[2] - lastPos[2];
angle = 90.0 * sqrt(dx*dx + dy*dy + dz*dz);
axis[0] = lastPos[1]*curPos[2] - lastPos[2]*curPos[1];
axis[1] = lastPos[2]*curPos[0] - lastPos[0]*curPos[2];
axis[2] = lastPos[0]*curPos[1] - lastPos[1]*curPos[0];
//glutPostRedisplay();
//[self drawRect:self.bounds]; // different attempts
[self setNeedsDisplay:YES]; //
//[self display];
}
#pragma mark Mouse Handling
- (void)mouseDown:(NSEvent *)theEvent {
m_PreviousMouseLoc = [self convertPoint:[theEvent locationInWindow] fromView:nil];
ptov(m_PreviousMouseLoc.x,m_PreviousMouseLoc.y,m_width,m_height,lastPos);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- (void)mouseUp:(NSEvent *)theEvent {
[self setNeedsDisplay:YES];
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void ptov(int x, int y, int width, int height, float v[3]){
float a,d;
// project x,y onto 1/2 sphere centered within width and height
v[0] = (2.0*x - width)/ width;
v[1] = (height - 2.0*y)/ height;
d = sqrt(v[0]*v[0] + v[1]*v[1]);
v[2] = cos((PI/2)*((d < 1.0) ? d : 1.0));
a = 1.0 / sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]);
v[0] *=a;
v[1] *=a;
v[2] *=a;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-(void)prepareOpenGL{
if( ![ self loadGLTextures ] ){
NSLog(#"Failed to load GL Textures");
}
GLint swapInt = 1;
[[self openGLContext] setValues:&swapInt forParameter:NSOpenGLCPSwapInterval]; // set to vbl sync
//glEnable( GL_TEXTURE_2D ); // Enable texture mapping
glShadeModel( GL_SMOOTH ); // Enable smooth shading
glClearColor( 0.0f, 0.0f, 0.0f, 0.5f ); // Black background
glClearDepth( 1.0f ); // Depth buffer setup
glEnable(GL_DEPTH_TEST); // Enables Depth Testing
glEnable (GL_BLEND);
glDepthFunc(GL_LEQUAL); // The Type Of Depth Testing To Do
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Really Nice Perspective Calculations
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glPolygonOffset (1.0f, 1.0f);
firstOccurenceOfDrawRect = NO;
/* // set start values...
rVel[0] = 0.3; rVel[1] = 0.1; rVel[2] = 0.2;
rAccel[0] = 0.003; rAccel[1] = -0.005; rAccel[2] = 0.004;*/
[[self window] setAcceptsMouseMovedEvents: YES];
glViewport(0, 0, (GLsizei) self.bounds.size.width, (GLsizei) self.bounds.size.height);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- (void) drawRect: (NSRect) rect
{
/// if (firstOccurenceOfDrawRect)
// [self initGL];
// On charge la matrice de projection pour définir ce que l'on va voir et comment
glMatrixMode(GL_PROJECTION);
// On centre
glLoadIdentity();
// On prend du recul
glOrtho(-1.0f, 1.0f, -1.5f, 1.5f, -10.0f, 10.0f);
// On efface ce qui a été dessiné auparavant
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// On va maintenant définir ce que l'on veut dessiner
glMatrixMode(GL_MODELVIEW);
// Avant de commencer à dessiner, on va affecter de deux rotations le repère des coordonnées du cube pour le faire "bouger"
glLoadIdentity();
glRotatef(angle, axis[0], axis[1], axis[2]);
// [self resizeGL]; // forces projection matrix update (does test for size changes)
// [self updateModelView];
glColor3f (1.0, 1.0, 1.0);
glDisable(GL_CULL_FACE);
glFrontFace(GL_CW);
glutSolidTeapot(200.0);
glFrontFace(GL_CCW);
glEnable(GL_TEXTURE_2D);
glBindTexture( GL_TEXTURE_2D, texture[ 0 ] ); // Select our texture
glBegin( GL_QUADS );
// Front
glTexCoord2f( 0.0f, 0.0f );
glVertex3f( -1.0f, -1.0f, 0.0f ); // Bottom left
glTexCoord2f( 1.0f, 0.0f );
glVertex3f( 1.0f, -1.0f, 0.0f ); // Bottom right
glTexCoord2f( 1.0f, 1.0f );
glVertex3f( 1.0f, 1.0f, 0.0f ); // Top right
glTexCoord2f( 0.0f, 1.0f );
glVertex3f( -1.0f, 1.0f, 0.0f ); // Top left
glEnd();
glBindTexture( GL_TEXTURE_2D, texture[ 1 ] ); // Select our texture
glBegin( GL_QUADS );
// side
glTexCoord2f( 1.0f, 0.0f );
glVertex3f( 0.0f, -1.0f, -1.0f ); // Bottom right
glTexCoord2f( 1.0f, 1.0f );
glVertex3f( 0.0f, 1.0f, -1.0f ); // Top right
glTexCoord2f( 0.0f, 1.0f );
glVertex3f( 0.0f, 1.0f, 1.0f ); // Top left
glTexCoord2f( 0.0f, 0.0f );
glVertex3f( 0.0f, -1.0f, 1.0f ); // Bottom left
glEnd();
glBindTexture( GL_TEXTURE_2D, texture[ 2 ] ); // Select our texture
glBegin( GL_QUADS );
// Top
glTexCoord2f( 0.0f, 1.0f );
glVertex3f( -1.0f, 0.0f, -1.0f ); // Top left
glTexCoord2f( 0.0f, 0.0f );
glVertex3f( -1.0f, 0.0f, 1.0f ); // Bottom left
glTexCoord2f( 1.0f, 0.0f );
glVertex3f( 1.0f, 0.0f, 1.0f ); // Bottom right
glTexCoord2f( 1.0f, 1.0f );
glVertex3f( 1.0f, 0.0f, -1.0f ); // Top right
glEnd();
glDisable(GL_TEXTURE_2D);
glColor3f (1.0, 1.0, 1.0);
glDisable(GL_CULL_FACE);
glFrontFace(GL_CW);
glutSolidTeapot(200.0);
[self postRemoteImage];
[[self openGLContext] flushBuffer];
}
Is this the entire code for the relevant methods? If so, you're never grabbing the mouse's position when you're dragging. curPos[] is uninitialized and may be returning the same values for each time you get a mouseDragged: message. This may make your object always rotate to the same position, making it appear like it's not moving.
Try inserting an NSLog in your mouseDragged: method to make sure that it's being called for your mouse drags (it probably is, but it's good to check).
There's a slight possibility that you're not giving the main run loop a chance to refresh the display, but I think that's unlikely. I've done something similar in my code and had it work just fine with everything on the main thread.
Rather than rolling your own trackball implementation, I suggest looking at Bill Dudney's trackball example code. While it is for the iPhone and deals with Core Animation, the trackball object he uses should be applicable to your case.

Resources