OpenGL - Coloring a triangle - opengl-es

I can currently draw the triangle, set the background as Red, but I can't color the triangle. What obvious step am I missing here?
Current Progress - Red background and black triangle
const GLfloat vertices[] = {
vertex1_s, vertex1_t, 0,
vertex2_s, vertex2_t, 0,
vertex3_s, vertex3_t, 0
};
glClearColor(1,0,0,0);
glClear(GL_COLOR_BUFFER_BIT);
const GLfloat colors[] = {
0, 0, 1,
0, 0, 1,
0, 0, 1
};
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3, GL_FLOAT, 0, colors);
glDrawArrays(GL_TRIANGLES, 0, 3);
eglSwapBuffers(dpy, surface);

Related

Enabling light0 in GLKit results in loss of color

I've been modifying some sample code from OpenGL ES 2 tutorials. I can set the GLKBaseEffect to use a constant colour. Now, when I go to add light to the scene, the colour is lost and I am left with 2 black cubes.
Below is the draw method:
- (void)draw
{
GLKMatrix4 xRotationMatrix = GLKMatrix4MakeXRotation(rotation.x);
GLKMatrix4 yRotationMatrix = GLKMatrix4MakeYRotation(rotation.y);
GLKMatrix4 zRotationMatrix = GLKMatrix4MakeZRotation(rotation.z);
GLKMatrix4 scaleMatrix = GLKMatrix4MakeScale(scale.x, scale.y, scale.z);
GLKMatrix4 translateMatrix = GLKMatrix4MakeTranslation(position.x, position.y, position.z);
GLKMatrix4 modelMatrix =
GLKMatrix4Multiply(translateMatrix,
GLKMatrix4Multiply(scaleMatrix,
GLKMatrix4Multiply(zRotationMatrix,
GLKMatrix4Multiply(yRotationMatrix, xRotationMatrix))));
GLKMatrix4 viewMatrix = GLKMatrix4MakeLookAt(0, 0, 3, 0, 0, 0, 0, 1, 0);
effect.transform.modelviewMatrix = GLKMatrix4Multiply(viewMatrix, modelMatrix);
effect.transform.projectionMatrix = GLKMatrix4MakePerspective(0.125*M_TAU, 2.0/3.0, 2, -1);
effect.useConstantColor = YES;
effect.constantColor = redColor;
[effect prepareToDraw];
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 0, triangleVertices);
// Sticking with constant color as opposed to per-vertex vertex shading for now...
// glEnableVertexAttribArray(GLKVertexAttribColor);
// glVertexAttribPointer(GLKVertexAttribColor, 4, GL_FLOAT, GL_FALSE, 0, triangleColors);
glDrawArrays(GL_TRIANGLES, 0, 36);
glDisableVertexAttribArray(GLKVertexAttribPosition);
glDisableVertexAttribArray(GLKVertexAttribColor);
}
I enable the light in my initialise static method :
+ (void)initialize {
if (!initialised) {
vertices[0] = GLKVector3Make(-0.5, -0.5, 0.5); // Left bottom front
vertices[1] = GLKVector3Make( 0.5, -0.5, 0.5); // Right bottom front
vertices[2] = GLKVector3Make( 0.5, 0.5, 0.5); // Right top front
vertices[3] = GLKVector3Make(-0.5, 0.5, 0.5); // Left top front
vertices[4] = GLKVector3Make(-0.5, -0.5, -0.5); // Left bottom back
vertices[5] = GLKVector3Make( 0.5, -0.5, -0.5); // Right bottom back
vertices[6] = GLKVector3Make( 0.5, 0.5, -0.5); // Right top back
vertices[7] = GLKVector3Make(-0.5, 0.5, -0.5); // Left top back
colors[0] = GLKVector4Make(1.0, 0.0, 0.0, 1.0); // Red
colors[1] = GLKVector4Make(0.0, 1.0, 0.0, 1.0); // Green
colors[2] = GLKVector4Make(0.0, 0.0, 1.0, 1.0); // Blue
colors[3] = GLKVector4Make(0.0, 0.0, 0.0, 1.0); // Black
colors[4] = GLKVector4Make(0.0, 0.0, 1.0, 1.0); // Blue
colors[5] = GLKVector4Make(0.0, 0.0, 0.0, 1.0); // Black
colors[6] = GLKVector4Make(1.0, 0.0, 0.0, 1.0); // Red
colors[7] = GLKVector4Make(0.0, 1.0, 0.0, 1.0); // Green
int vertexIndices[36] = {
// Front
0, 1, 2,
0, 2, 3,
// Right
1, 5, 6,
1, 6, 2,
// Back
5, 4, 7,
5, 7, 6,
// Left
4, 0, 3,
4, 3, 7,
// Top
3, 2, 6,
3, 6, 7,
// Bottom
4, 5, 1,
4, 1, 0,
};
for (int i = 0; i < 36; i++) {
triangleVertices[i] = vertices[vertexIndices[i]];
triangleColors[i] = colors[vertexIndices[i]];
}
effect = [[GLKBaseEffect alloc] init];
effect.lightingType = GLKLightingTypePerVertex;
effect.lightModelAmbientColor = GLKVector4Make(0.3f, 0.3f, 0.33f, 1.0f);
effect.light0.enabled = GL_TRUE;
effect.light0.diffuseColor = GLKVector4Make(1.0f, 0.4f, 0.4f, 1.0f);
initialised = YES;
}
}
The following are screenshots of when the lighting is disabled (red color) and disabled (black colour)
Lighting requires your vertices to have both position and normal attributes -- you appear to only have position.
You may also need to enable the color material property:
self.effect = [[GLKBaseEffect alloc] init];
self.effect.colorMaterialEnabled = GL_TRUE;

OpenGL: set image in a texture

Here is my code:
-(void) mergeWithImage:(UIImage*) image{
if(image==nil){
return;
}
glPushMatrix();
glColor4f(256,
256,
256,
1.0);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glGenTextures(1, &stampTexture);
glBindTexture(GL_TEXTURE_2D, stampTexture);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
GLuint imgwidth = CGImageGetWidth(image.CGImage);
GLuint imgheight = CGImageGetHeight(image.CGImage);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
void *imageData = malloc( imgheight * imgwidth * 4 );
CGContextRef context2 = CGBitmapContextCreate( imageData, imgwidth, imgheight, 8, 4 * imgwidth, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big );
CGContextTranslateCTM (context2, 0, imgheight);
CGContextScaleCTM (context2, 1.0, -1.0);
CGColorSpaceRelease( colorSpace );
CGContextClearRect( context2, CGRectMake( 0, 0, imgwidth, imgheight ) );
CGContextTranslateCTM( context2, 0, imgheight - imgheight );
CGContextDrawImage( context2, CGRectMake( 0, 0, imgwidth, imgheight ), image.CGImage );
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imgwidth, imgheight, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
CGContextRelease(context2);
free(imageData);
static const GLfloat texCoords[] = {
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0
};
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
/*
These array would need to be changed if the size of the paintview changes. You must make sure that all image imput is 64x64, 256x256, 512x512 or 1024x1024. In this we are using 512, but you can use 1024 as follows:
use the numbers:
{
0.0, height, 0.0,
1024, height, 0.0,
0.0, height-1024, 0.0,
1024, height-1024, 0.0
}
*/
static const GLfloat vertices[] = {
0.0, 1024, 0.0,
1024, 1024, 0.0,
0.0, 0, 0.0,
1024, 0, 0.0
};
static const GLfloat normals[] = {
0.0, 0.0, 1024,
0.0, 0.0, 1024,
0.0, 0.0, 1024,
0.0, 0.0, 1024
};
glBindTexture(GL_TEXTURE_2D, stampTexture);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glNormalPointer(GL_FLOAT, 0, normals);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glPopMatrix();
glDeleteTextures( 1, &stampTexture );
//set back the brush
glBindTexture(GL_TEXTURE_2D, brushTexture);
glColor4f(lastSetRed,
lastSetGreen,
lastSetBlue,
1.0);
// Display the buffer
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
It works fine if my image is 1024x1024 but if I have an image with size 1024x768, what's the value to assign at vertices and normals?
what's the value to assign at vertices and normals?
Those don't matter as they do not (directly) interfer with texture coordinates. BTW normals should always be unit length. Also if you do not want to apply lighting you don't need normals. And if you do want to apply lighting normals must be unit length.
Texture coordinates for regular textures are always in the range [0;1] no matter what the aspect ratio of your image is. The vertex positions should be chosen in accordance with the projection you use. You could for example use a
glOrtho(0, texture_width, 0, textture_height, …)
projection and then your vertices would be {0, texture_width}×{0, texture_height} either. There's no definitive answer to your problem.
add these lines when you bind your texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); //IMPORTANT FOR NON POWER OF 2 TEXTURES
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

Render texture with the right ratio in opengl

Im trying to render a texture keeping its ratio in openGL es 2.0 .
I suppose I need to calculate the ratio and change the frustum dimensions, is that the right way?
Right now Im rendering with frustum -1,1 for width and height so the texture gets stretched when it doesnt have the screen size.
How do I render the texture lets say width = 400 height = 800 at that ratio?
This is my code :
#Override
public void onDrawFrame(GL10 glUnused) {
GLES20.glClearColor(0.9f, 0.9f, 0.9f, .5f);
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT | GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glEnable( GLES20.GL_BLEND );
GLES20.glBlendFunc( GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA );
Matrix.setIdentityM(mMMatrix, 0);
Matrix.rotateM(mMMatrix, 0, 270.0f, 0, 0, 1);
Matrix.multiplyMM(mMVPMatrix, 0, mVMatrix, 0, mMMatrix, 0);
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mMVPMatrix, 0);
GLES20.glUseProgram(programTextured);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextureID);
sqTex.getVertexBuffer().position(sqTex.VERT_OFFSET);
GLES20.glVertexAttribPointer(
GLES20.glGetAttribLocation(programTextured, "aPosition"), 3,
GLES20.GL_FLOAT, false, 5 * 4, sqTex.getVertexBuffer());
GLES20.glEnableVertexAttribArray(GLES20.glGetAttribLocation(programTextured, "aPosition"));
sqTex.getVertexBuffer().position(sqTex.TEXT_OFFSET);
GLES20.glVertexAttribPointer(
GLES20.glGetAttribLocation(programTextured, "aTextureCoord"), 2,
GLES20.GL_FLOAT, false, 5 * 4, sqTex.getVertexBuffer());
GLES20.glEnableVertexAttribArray(GLES20.glGetAttribLocation(programTextured, "aTextureCoord"));
GLES20.glUniformMatrix4fv(
GLES20.glGetUniformLocation(programTextured, "uMVPMatrix"), 1, false,
mMVPMatrix, 0);
GLES20.glVertexAttrib4f(
GLES20.glGetAttribLocation(programTextured, "acolor"),
.6f,0.3f,0.9f,.5f);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, 6, GLES20.GL_UNSIGNED_SHORT, sqTex.getIndexBuffer());
GLES20.glDisableVertexAttribArray(GLES20.glGetAttribLocation(programTextured, "aTextureCoord"));
}
#Override
public void onSurfaceChanged(GL10 glUnused, int width, int height) {
GLES20.glViewport(0, 0, width, height);
float ratio = (float) width / height;
//flaot ratio2 = (float)height /
//Matrix.frustumM(mProjMatrix, 0, -ratio, ratio, -1, 1, 3f, 17);
Matrix.frustumM(mProjMatrix, 0, -1, 1, -1, 1, 3, 17);
}
int mTextureID;
#Override
public void onSurfaceCreated(GL10 glUnused, EGLConfig config) {
program = createProgram(mVertexShader, mFragmentShader);
programTextured = createProgram(mVertexShaderTextured, mFragmentShaderTextured);
Matrix.setLookAtM(mVMatrix, 0, 0, 0, -5, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
int[] textures = new int[1];
GLES20.glGenTextures(1, textures, 0);
mTextureID = textures[0];
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextureID);
GLES20.glTexParameterf(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER,
GLES20.GL_NEAREST);
GLES20.glTexParameterf(GLES20.GL_TEXTURE_2D,
GLES20.GL_TEXTURE_MAG_FILTER,
GLES20.GL_LINEAR);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S,
GLES20.GL_REPEAT);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T,
GLES20.GL_REPEAT);
InputStream is = mContext.getResources()
.openRawResource(R.drawable.image0003);
Bitmap bitmap;
try {
bitmap = BitmapFactory.decodeStream(is);
} finally {
try {
is.close();
} catch(IOException e) {
// Ignore.
}
}
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, bitmap, 0);
bitmap.recycle();
}
I would leave the frustum and rather use scale:
You need 2 factors, first a screenRatio = screenWidth/screenHeight, then imageRatio = imageWidth/imageHeight.
Now if imageRatio < screenRatio you will need to scale it down in width:
if (imageWidth>imageHeight) scaleM(mMMatrix, 1/(imageWidth/imageHeight), 1, 1)
else scaleM(mMMatrix, 1/(imageHeight/imageWidth), 1, 1)
Or if imageRatio > screenRatio you will need to scale it down in height:
if (imageHeight>imageWidth) scaleM(mMMatrix, 1, 1/(imageHeight/imageWidth), 1)
else scaleM(mMMatrix, 1, 1/(imageWidth/imageHeight), 1)

How to adjust/remove toolbar bounding rectangle

I have create my toolbar from the application, only 4 buttons and each button has the size of 80 by 64 pixels. The whole bitmap is 320 by 64.
const int numButtons = 4;
const SIZE bitmapSize={80,64};
HIMAGELIST hImageList = ImageList_Create(bitmapSize.cx, bitmapSize.cy, ILC_COLOR16 | ILC_MASK,numButtons, 0);
HBITMAP hBitmap = LoadBitmap(hInst, MAKEINTRESOURCE(IDB_HOME));
int iImageList = ImageList_AddMasked(hImageList, hBitmap, 0);
SendMessage(hWndToolbar, TB_SETIMAGELIST, 0, (LPARAM)hImageList);
TBBUTTON tbButtons[numButtons] =
{
{ 0, WM_BBBB1, TBSTATE_ENABLED, TBSTYLE_BUTTON, {0}, 0, 0},
{ 1, WM_BBBB2, TBSTATE_ENABLED, TBSTYLE_BUTTON, {0}, 0, 0},
{ 2, WM_BBBB3, TBSTATE_ENABLED, TBSTYLE_BUTTON, {0}, 0, 0},
{ 3, WM_BBBB4, TBSTATE_ENABLED, TBSTYLE_BUTTON, {0}, 0, 0}
};
// Add buttons.
SendMessage(hWndToolbar, TB_BUTTONSTRUCTSIZE, (WPARAM)sizeof(TBBUTTON), 0);
SendMessage(hWndToolbar, TB_ADDBUTTONS,(WPARAM)numButtons,(LPARAM)&tbButtons);
SendMessage(hWndToolbar, TB_AUTOSIZE, 0, 0);
RECT _tbRect;
SendMessage(hWndToolbar, TB_GETITEMRECT, 0, (LPARAM)&_tbRect);//0,0,87,70
It says the button has 87 in width and 70 in height, if my bitmap has its own bounding size and I don't want the control to add the bounding rectangle for toolbar, how can I do?
Thank you in advance!
Try to call
SendMessage(hWndToolbar, TB_SETBUTTONSIZE, 0, (LPARAM)MAKELONG(80,64)); // or some wanted values
after TB_ADDBUTTONS

IOS OpenGL ES - Rotate Camera around the scene

This is my code:
- (void)drawView {
// Define square for flowers head (vertecies)
const GLfloat flowerHead[] = {
-1.0, 1.0, 1.0, // top left
-1.0, -1.0, 1.0, // bottom left
1.0, -1.0, 1.0, // bottom right
1.0, 1.0, 1.0
};
// Define the texture coordinates --> as the flowerHead vertecies
const GLshort flowerHeadTextureCoord[] = {
0, 1, // top left
0, 0, // bottom left
1, 0, // bottom right
1, 1 // top right
};
rota = 1.0;
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glViewport(0, 0, backingWidth, backingHeight);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glTexCoordPointer(2, GL_SHORT, 0, flowerHeadTextureCoord);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// flower #1
glPushMatrix();
{
glEnable(GL_ALPHA_TEST); // enable alpha -> transparency state
glTranslatef(0.0, 0.0, -6.0); // positioning the flower
glRotatef(55.0, 1.0, 0.0, 0.0); // rotate flower on (x) axis
glVertexPointer(3, GL_FLOAT, 0, flowerHead); // pointer to the flowerHead vertecies array
glEnableClientState(GL_VERTEX_ARRAY); // enable the vertex array state
// Draw the front face
// Bind texture
glBindTexture(GL_TEXTURE_2D, textures[0]);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
glPopMatrix();
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
[self checkGLError:NO];
}
Now this is my drawing code...all I need now is to rotate the camera around the object (and not the object itself)...
My setupView function:
const GLfloat zNear = 0.1, zFar = 1000.0, fieldOfView = 60.0;
GLfloat size;
// Set the general polygon properties - for transparency!!!
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
glAlphaFunc(GL_GREATER,0.1f);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
size = zNear * tanf(DEGREES_TO_RADIANS(fieldOfView) / 2.0);
// This give us the size of the iPhone display
CGRect rect = self.bounds;
glFrustumf(-size, size, -size / (rect.size.width / rect.size.height), size / (rect.size.width / rect.size.height), zNear, zFar);
glViewport(0, 0, rect.size.width, rect.size.height);
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);

Resources