I want to create a simple Cube of Lines in OpenGL and QT 5.5.
The Problem is, if I´m drawing a simple line to another Z-coordinate (from z -1.0f to z 1.0f), the line will look like it goes to z=infinity.
Vertex stackVerts[] = {
{QVector3D(-1.0f, -1.0f, 1.0f), // 0
QVector3D(+1.0f, +0.0f, +0.0f)}, // Colour
{QVector3D(1.0f, -1.0f, 1.0f), // 1
QVector3D(+0.0f, +1.0f, +0.0f)}, // Colour
{QVector3D(-1.0f, 1.0f, 1.0f), // 2
QVector3D(+0.0f, +0.0f, +1.0f)}, // Colour
{QVector3D(1.0f, 1.0f, 1.0f), // 3
QVector3D(+1.0f, +1.0f, +1.0f)}, // Colour
{QVector3D(-1.0f, -1.0f, -1.0f), // 4
QVector3D(+1.0f, +0.0f, +0.0f)}, // Colour
{QVector3D(1.0f, -1.0f, -1.0f), // 5
QVector3D(+0.0f, +1.0f, +0.0f)}, // Colour
{QVector3D(-1.0f, 1.0f, -1.0f), // 6
QVector3D(+0.0f, +0.0f, +1.0f)}, // Colour
{QVector3D(1.0f, 1.0f, -1.0f), // 7
QVector3D(+1.0f, +1.0f, +1.0f)}, // Colour
};
GLushort stackIndices[] = {
0,1,
1,3,
3,2,
2,0,
0,4,
};
...
glDrawElements(GL_LINES, 10, GL_UNSIGNED_SHORT, 0);
The Matrix is set a Uniform:
_transMatrix.setToIdentity();
_transMatrix.perspective(60.0f, (float)(width()/height()), 0.1f, 100.0f);
_transMatrix.translate(0, 0, -3);
_transMatrix.rotate(64,0,1,0);
_program.setUniformValue("transMatrix", _transMatrix);
The Vertex.glsl:
#ifdef GL_ES
// Set default precision to medium
precision mediump int;
precision mediump float;
#endif
attribute vec3 a_position;
attribute vec3 a_color;
uniform mat4 transMatrix;
varying vec3 theColor;
void main()
{
vec4 v = vec4 (a_position, 1.0);
gl_Position = v * transMatrix;
theColor = a_color;
}
The GL_DEPTH_TEST is enabled and if i change the translation of z to -1000, it will still go to infinity.
Translate z=-3
Translate z=-1000
Related
I try to render the ArFrame into a render target using OpenGLES 3.2. The ArCore example shows the usage of GLES2 but inside GLES3 the extension is not available. Now I have found the extension GL_OES_EGL_image_external_essl3 to use samplerExternalOES. Therefore I have included gl3.h and the gl2ext.h.
The creation of the texture is similar to the ArCore example:
glGenTextures(1, &g_TextureID);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, g_TextureID);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
ArSession_setCameraTextureName(m_pARSession, g_TextureID);
Inside the rendering loop:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, g_TextureID);
My shaders look like this:
constexpr char kVertexShader[] = R"(
#version 320 es
layout(location = 0) in vec2 in_UV;
layout(location = 0) out vec2 out_UV;
void main()
{
vec2 Vertices[4];
Vertices[0] = vec2(-1.0f, -1.0f);
Vertices[1] = vec2( 1.0f, -1.0f);
Vertices[2] = vec2(-1.0f, 1.0f);
Vertices[3] = vec2( 1.0f, 1.0f);
out_UV = in_UV;
gl_Position = vec4(Vertices[gl_VertexID], 0.0f, 1.0f);
}
)";
constexpr char kFragmentShader[] = R"(
#version 320 es
#extension GL_OES_EGL_image_external_essl3 : require
precision mediump float;
layout(location = 0) uniform samplerExternalOES in_ExtOESTexture;
layout(location = 0) in vec2 in_UV;
layout(location = 0) out vec4 out_Output;
void main()
{
out_Output = texture(in_ExtOESTexture, in_UV); //vec4(in_UV, 0, 1);
}
)";
Rendering only the in_UV with out_Output = vec4(in_UV, 0, 1);, the result is a perfectly looking UV texture on the screen.
But using the texture, everything is black. The size of the the texture (using textureSize(in_ExtOESTexture, 0)) is zero for both dimensions.
Any idea how to solve this problem?
The problem was that no "arcore-preview2.apk" was installed on the device. So, no error is raised if this APK is not installed. The screen remains just black. Moreover, the google examples on GitHub are working without this APK. I have created a new issue on GitHub because of this unexpected behavior.
I was trying to add some basic lighting effects to an animating cube but for some reason it just displays a blank colored window. I'm totally lost in finding the problem. The code was working fine before adding the lighting. The first thing that I did to find the problem is to add the
gl_FragColor = vec4(1.0, 1.0, 0.0, 1.0);
to the end of fragment shader in order to avoid all the lighting calculations that might go wrong. But it just changed the color shown in the window. I'm not sure if this problem is because of camera settings or calculating normals or maybe other factors. Here is the obj file data that I'm using to load the cube:
v -1.0 -1.0 1.0
v 1.0 -1.0 1.0
v 1.0 1.0 1.0
v -1.0 1.0 1.0
v -1.0 -1.0 -1.0
v 1.0 -1.0 -1.0
v 1.0 1.0 -1.0
v -1.0 1.0 -1.0
vn 0.577350 0.577350 -0.577350
vn 0.577350 -0.577350 -0.577350
vn -0.577350 -0.577350 -0.577350
vn -0.577350 0.577350 -0.577350
vn 0.577350 0.577350 0.577350
vn 0.577350 -0.577350 0.577350
vn -0.577350 -0.577350 0.577350
vn -0.577350 0.577350 0.577350
f 0 1 2
f 2 3 0
f 1 5 6
f 6 2 1
f 7 6 5
f 5 4 7
f 4 0 3
f 3 7 4
f 4 5 1
f 1 0 4
f 3 2 6
f 6 7 3
This is the camera setting and matrix calculations:
float move = -4.0f;// sinf(SDL_GetTicks() / 1000.0 * (2 * 3.14) / 5); // -1<->+1 every 5 seconds
float angle = 0.0;// SDL_GetTicks() / 1000.0 * 45; // 45° per second
glm::vec3 axis_y(0, 1, 0);
glm::mat4 translation = glm::translate(glm::mat4(1.0f), glm::vec3(0, 0, move));
glm::mat4 rotation = glm::rotate(glm::mat4(1.0f), glm::radians(angle), axis_y);
glm::mat4 scaling = glm::scale(glm::mat4(1.0f), glm::vec3(1.0f, 1.0f, 1.0f));
glm::mat4 model = translation * rotation * scaling;
glm::mat4 view = glm::lookAt(glm::vec3(0.0, 2.0, 0.0), glm::vec3(0.0, 0.0, -4.0), glm::vec3(0.0, 1.0, 0.0));
glm::mat4 projection = glm::perspective(45.0f, 1.0f*screen_width / screen_height, 0.1f, 10.0f);
glm::mat4 mvp = projection * view * model;
glm::mat4 mv = view * model;
glm::mat4 normalMatrix = glm::transpose(glm::inverse(mv));
glUniformMatrix4fv(uniform_mvp, 1, GL_FALSE, glm::value_ptr(mv));
glUniformMatrix4fv(uniform_mvp, 1, GL_FALSE, glm::value_ptr(mvp));
glUniformMatrix4fv(uniform_mvp, 1, GL_FALSE, glm::value_ptr(normalMatrix));
and the vertex shader includes:
gl_Position = mvp * vec4(positions, fade);
I have defined the lighting properties as follows:
float shininess = 5.0f;
glm::vec3 lightDirection = glm::vec3(0.0f, 1.0f, 2.0f);
glm::vec4 lightAmbient = glm::vec4(1, 0.83, 0.75, 1.0f);
glm::vec4 lightDiffuse = glm::vec4(1.0f, 1.0f, 0.4f, 1.0f);
glm::vec4 lightSpecular = glm::vec4(1.0f, 1.0f, 1.0f, 1.0f);
glm::vec4 materialAmbient = glm::vec4(1, 0.83, 0.75, 1.0f);
glm::vec4 materialDiffuse = glm::vec4(1.0f, 1.0f, 0.4f, 1.0f);
glm::vec4 materialSpecular = glm::vec4(1.0f, 1.0f, 1.0f, 1.0f);
glUniform1f(uniform_shininess, shininess);
glUniform3fv(uniform_lightDirection, 1, glm::value_ptr(lightDirection));
glUniform4fv(uniform_lightAmbient, 1, glm::value_ptr(lightAmbient));
glUniform4fv(uniform_lightDiffuse, 1, glm::value_ptr(lightDiffuse));
glUniform4fv(uniform_lightSpecular, 1, glm::value_ptr(lightSpecular));
glUniform4fv(uniform_materialAmbient, 1, glm::value_ptr(materialAmbient));
glUniform4fv(uniform_materialDiffuse, 1, glm::value_ptr(materialDiffuse));
glUniform4fv(uniform_materialSpecular, 1, glm::value_ptr(materialSpecular));
and fragment shader is:
uniform float shininess;
uniform vec3 lightDirection;
uniform vec4 lightAmbient;
uniform vec4 lightDiffuse;
uniform vec4 lightSpecular;
uniform vec4 materialAmbient;
uniform vec4 materialDiffuse;
uniform vec4 materialSpecular;
varying vec3 vNormal;
varying vec3 vEyeVec;
void main(void){
vec4 Ia = lightAmbient * materialAmbient;
vec4 Id = vec4(0.0,0.0,0.0,1.0);
vec4 Is = vec4(0.0,0.0,0.0,1.0);
vec3 L = normalize(lightDirection);
vec3 N = normalize(vNormal);
float lambertTerm = dot(N,-L);
if(lambertTerm > 0.0){
Id = lightDiffuse * materialDiffuse * lambertTerm;
vec3 E = normalize(vEyeVec);
vec3 R = reflect(L, N);
float specular = pow( max(dot(R, E), 0.0), shininess);
Is = lightSpecular * materialSpecular * specular;
}
vec4 finalColor = Ia + Id + Is;
finalColor.a = 1.0;
gl_FragColor = finalColor;
//gl_FragColor = vec4(1.0, 1.0, 0.0, 1.0);
}
The two varyings are coming from vertex shader:
vNormal = vec3(normalMatrix * vec4(normals, 1.0));
vec4 vertex = mv * vec4(positions, 1.0);
vEyeVec = -vec3(vertex.xyz);
What I was guessing is that if the normals and lighting calculations go wrong then the last line in the fragment shader gl_FragColor = vec4(1.0, 1.0, 0.0, 1.0); should cancel them. However, that didn't make any help. I tried different camera positions but that didn't make any difference.
I am writing a shader to draw lines with a width, as an alternative to glLineWidth, which doesn't work above 1.0 with ANGLE, and I'd like my lines to have the same thickness on Windows. I am running on desktop OpenGL for now, though.
The vertex shader source is as follows
attribute vec3 a_startPosition;
attribute vec3 a_endPosition;
attribute float a_choice;
attribute float a_dir;
uniform mat4 u_mvpMatrix;
uniform float u_width;
uniform vec2 u_viewDims;
void main()
{
vec4 start = u_mvpMatrix*vec4(a_startPosition, 1.0);
vec4 end = u_mvpMatrix*vec4(a_endPosition, 1.0);
//gl_Vertex;
vec2 slope = normalize(end.xy - start.xy);
slope = vec2(slope.y, -slope.x);
vec2 scale = u_width/u_viewDims;
if (a_choice == 0.0)
gl_Position = vec4(start.xy + a_dir*scale*slope.xy*start.w, start.zw);
else
gl_Position = vec4(end.xy + a_dir*scale*slope.xy*end.w, end.zw);
}
See that I have gl_Vertex, unused, commented out.
int width, height;
glfwGetFramebufferSize(m_window, &width, &height);
glUseProgram(m_shaders[Shader_WideLine]->id());
GLint shaderid = m_shaders[Shader_WideLine]->id();
GLint coloc = glGetUniformLocation(shaderid, "Color");
GLint dimloc = glGetUniformLocation(shaderid, "u_viewDims");
GLint widthloc = glGetUniformLocation(shaderid, "u_width");
GLint mvploc = glGetUniformLocation(shaderid, "u_mvpMatrix");
GLint modelviewloc = glGetUniformLocation(shaderid, "u_modelview");
GLint projloc = glGetUniformLocation(shaderid, "u_projection");
GLint dirloc = glGetAttribLocation(shaderid, "a_dir");
GLint startloc = glGetAttribLocation(shaderid, "a_startPosition");
GLint endloc = glGetAttribLocation(shaderid, "a_endPosition");
GLint chloc = glGetAttribLocation(shaderid, "a_choice");
//////////
//Set Uniforms
//////////
glUniform1f(widthloc, 10);
glUniform2f(dimloc, width, height);
glUniform4f(coloc, 0.101f, 0.558f, 0.109f, 1.f);
glm::mat4 modelview;
glm::mat4 projection;
glGetFloatv(GL_MODELVIEW_MATRIX, glm::value_ptr(modelview));
glGetFloatv(GL_PROJECTION_MATRIX, glm::value_ptr(projection));
glm::mat4 mvp = projection * modelview;
glUniformMatrix4fv(mvploc, 1, GL_FALSE, glm::value_ptr(mvp));
int numpts = 4;
GLfloat v[4][3] = {
{0,1,0},
{0,0,0},
{1,0,0},
{1,1,0}
};
//////////
// Draw (attributes)
//////////
glBegin( GL_TRIANGLE_STRIP );
glNormal3d(0.0, 0.0, 1.0);
for(int i=0; i<numpts-1; i++)
{
glVertexAttrib3fv(startloc, v[i]);
glVertexAttrib3fv(endloc, v[i+1]);
glVertexAttrib1f(chloc, 0);
glVertexAttrib1f(dirloc, -1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(dirloc, 1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(chloc, -1);
glVertexAttrib1f(dirloc, -1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(dirloc, 1.0f);
glVertex3d(0,0,0);
}
glEnd();
glUseProgram(0);
So I am trying to draw lines from (0,1,0) to (0,0,0) to (1,0,0) to (1,1,0) with a width of 10 pixels. In the following images is a wire cube 2x2x2 centered on the origin for reference.
When called as presented I get the unexpected result of this
If I uncomment gl_Vertex; in the shader, so that it is unused but referenced, I get this expected result.
What is the reason that this could happen?
gl_ModelViewProjectionMatrix is not a valid ES 2.0 vertex shader built-in variable.
You'll have to pass in your MVP via uniform.
Could I bind a vec2 array to a vec4 variable in shader language?
For example, in the code blow, the variable "position" is a vec4 type, and I tried to bind variable "squareVertices" to it, but squareVertices is a vec2 type array, only with (x,y) coodinates, maybe default (x,y,z,w) = (x,y,0,1)?
My Vertex shader and attribute bind code:
attribute vec4 position;
attribute vec2 textureCoordinate;
varying vec2 coordinate;
void main()
{
gl_Position = position;
coordinate = textureCoordinate;
}
glBindAttribLocation(gProgram, ATTRIB_VERTEX, "position");
glBindAttribLocation(gProgram, ATTRIB_TEXTURE, "textureCoordinate");
// bind attribute values
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXTURE, 2, GL_FLOAT, 0, 0, coordVertices);
glEnableVertexAttribArray(ATTRIB_TEXTURE);
// squareVertices definition
static const GLfloat squareVertices[] = {
-1.0f, -1.0f,
0.0f, -1.0f,
-1.0f, 0.0f,
0.0f, 0.0f,
};
Yes, this is legal. And, you're correct that the default values of z and w are 0 and 1, respectively. This is covered in section 2.7 of the OpenGL ES 2.0 specification.
How to move object in world via glUniform**?
I tried glUniform3f(_positionSlot, 6.0f, 6.0f, -2.0f);
But my object not moving.
shader.vs:
attribute vec4 Position; // 1
attribute vec4 SourceColor; // 2
varying vec4 DestinationColor; // 3
uniform mat4 Projection;
uniform mat4 Modelview;
void main(void) { // 4
DestinationColor = SourceColor; // 5
gl_Position = Projection * Modelview *Position;
}
Reder:
- (void)render:(CADisplayLink*)displayLink {
glClearColor(0, 104.0/255.0, 55.0/255.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
CC3GLMatrix *projection = [CC3GLMatrix matrix];
float h = 4.0f * self.frame.size.height / self.frame.size.width;
[projection populateFromFrustumLeft:-2 andRight:2 andBottom:-h/2 andTop:h/2 andNear:4 andFar:10];
glUniformMatrix4fv(_projectionUniform, 1, 0, projection.glMatrix);
CC3GLMatrix *modelView = [CC3GLMatrix matrix];
[modelView populateFromTranslation:CC3VectorMake(sin(CACurrentMediaTime()), 0, -7)];
_currentRotation += displayLink.duration * 90;
[modelView rotateBy:CC3VectorMake(_currentRotation, _currentRotation, 0)];
glUniformMatrix4fv(_modelViewUniform, 1, 0, modelView.glMatrix);
// 1
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
// 2
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribPointer(_colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
glUniform4f(_colorSlot, 1.0f, 0.0f, 0.0f,1);
glDrawElements(GL_TRIANGLE_STRIP, 14, GL_UNSIGNED_SHORT, indices);
glUniform3f(_modelViewUniform, 6.0f, 6.0f, -2.0f);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer2);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
EDIT:
_positionLocation=glGetAttribLocation(programHandle, "Translation");
glUniform3f(_positionLocation, -1.0f, 6.0f, -2.0f);
glDrawElements(GL_TRIANGLE_STRIP, 14, GL_UNSIGNED_SHORT, indices);
glUniform3f(_positionLocation, 1.0f, 6.0f, -2.0f);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer2);
If I draw only cube or only square all is ok. But seems that couple of it renders at same position and I see only cube. Seems that this method doesn't work.
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
gl_Position = Projection * Modelview * Position;
gl_Position is the vertex shader output that receives the transformed position of each vertex, not just the whole model. The "position slot" (varying/attribute) is used to pass vertex attribute data, namely each vertex position to the shader. You cannot apply a uniform to an attribute or varying. You need some additional uniform (let's call it "translation"):
attribute vec4 Position; // 1
attribute vec4 SourceColor; // 2
varying vec4 DestinationColor; // 3
uniform mat4 Projection;
uniform mat4 Modelview;
uniform vec4 Translation;
void main(void) { // 4
DestinationColor = SourceColor; // 5
gl_Position = Projection * Modelview * (Position + Translation);
}
Which you can set using with something like
glUniform3f(positionLocation, 6.0f, 6.0f, -2.0f);
BTW: Those are not called "slots", they're called "Locations".