Using GL_TEXTURE_EXTERNAL_OES in OpenGLES 3.x for ArCore - opengl-es

I try to render the ArFrame into a render target using OpenGLES 3.2. The ArCore example shows the usage of GLES2 but inside GLES3 the extension is not available. Now I have found the extension GL_OES_EGL_image_external_essl3 to use samplerExternalOES. Therefore I have included gl3.h and the gl2ext.h.
The creation of the texture is similar to the ArCore example:
glGenTextures(1, &g_TextureID);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, g_TextureID);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
ArSession_setCameraTextureName(m_pARSession, g_TextureID);
Inside the rendering loop:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, g_TextureID);
My shaders look like this:
constexpr char kVertexShader[] = R"(
#version 320 es
layout(location = 0) in vec2 in_UV;
layout(location = 0) out vec2 out_UV;
void main()
{
vec2 Vertices[4];
Vertices[0] = vec2(-1.0f, -1.0f);
Vertices[1] = vec2( 1.0f, -1.0f);
Vertices[2] = vec2(-1.0f, 1.0f);
Vertices[3] = vec2( 1.0f, 1.0f);
out_UV = in_UV;
gl_Position = vec4(Vertices[gl_VertexID], 0.0f, 1.0f);
}
)";
constexpr char kFragmentShader[] = R"(
#version 320 es
#extension GL_OES_EGL_image_external_essl3 : require
precision mediump float;
layout(location = 0) uniform samplerExternalOES in_ExtOESTexture;
layout(location = 0) in vec2 in_UV;
layout(location = 0) out vec4 out_Output;
void main()
{
out_Output = texture(in_ExtOESTexture, in_UV); //vec4(in_UV, 0, 1);
}
)";
Rendering only the in_UV with out_Output = vec4(in_UV, 0, 1);, the result is a perfectly looking UV texture on the screen.
But using the texture, everything is black. The size of the the texture (using textureSize(in_ExtOESTexture, 0)) is zero for both dimensions.
Any idea how to solve this problem?

The problem was that no "arcore-preview2.apk" was installed on the device. So, no error is raised if this APK is not installed. The screen remains just black. Moreover, the google examples on GitHub are working without this APK. I have created a new issue on GitHub because of this unexpected behavior.

Related

QT OpenGL is drawing my Vector to infinity

I want to create a simple Cube of Lines in OpenGL and QT 5.5.
The Problem is, if I´m drawing a simple line to another Z-coordinate (from z -1.0f to z 1.0f), the line will look like it goes to z=infinity.
Vertex stackVerts[] = {
{QVector3D(-1.0f, -1.0f, 1.0f), // 0
QVector3D(+1.0f, +0.0f, +0.0f)}, // Colour
{QVector3D(1.0f, -1.0f, 1.0f), // 1
QVector3D(+0.0f, +1.0f, +0.0f)}, // Colour
{QVector3D(-1.0f, 1.0f, 1.0f), // 2
QVector3D(+0.0f, +0.0f, +1.0f)}, // Colour
{QVector3D(1.0f, 1.0f, 1.0f), // 3
QVector3D(+1.0f, +1.0f, +1.0f)}, // Colour
{QVector3D(-1.0f, -1.0f, -1.0f), // 4
QVector3D(+1.0f, +0.0f, +0.0f)}, // Colour
{QVector3D(1.0f, -1.0f, -1.0f), // 5
QVector3D(+0.0f, +1.0f, +0.0f)}, // Colour
{QVector3D(-1.0f, 1.0f, -1.0f), // 6
QVector3D(+0.0f, +0.0f, +1.0f)}, // Colour
{QVector3D(1.0f, 1.0f, -1.0f), // 7
QVector3D(+1.0f, +1.0f, +1.0f)}, // Colour
};
GLushort stackIndices[] = {
0,1,
1,3,
3,2,
2,0,
0,4,
};
...
glDrawElements(GL_LINES, 10, GL_UNSIGNED_SHORT, 0);
The Matrix is set a Uniform:
_transMatrix.setToIdentity();
_transMatrix.perspective(60.0f, (float)(width()/height()), 0.1f, 100.0f);
_transMatrix.translate(0, 0, -3);
_transMatrix.rotate(64,0,1,0);
_program.setUniformValue("transMatrix", _transMatrix);
The Vertex.glsl:
#ifdef GL_ES
// Set default precision to medium
precision mediump int;
precision mediump float;
#endif
attribute vec3 a_position;
attribute vec3 a_color;
uniform mat4 transMatrix;
varying vec3 theColor;
void main()
{
vec4 v = vec4 (a_position, 1.0);
gl_Position = v * transMatrix;
theColor = a_color;
}
The GL_DEPTH_TEST is enabled and if i change the translation of z to -1000, it will still go to infinity.
Translate z=-3
Translate z=-1000

GLSL Vertex Shader gives wrong results if I do not mention gl_Vertex

I am writing a shader to draw lines with a width, as an alternative to glLineWidth, which doesn't work above 1.0 with ANGLE, and I'd like my lines to have the same thickness on Windows. I am running on desktop OpenGL for now, though.
The vertex shader source is as follows
attribute vec3 a_startPosition;
attribute vec3 a_endPosition;
attribute float a_choice;
attribute float a_dir;
uniform mat4 u_mvpMatrix;
uniform float u_width;
uniform vec2 u_viewDims;
void main()
{
vec4 start = u_mvpMatrix*vec4(a_startPosition, 1.0);
vec4 end = u_mvpMatrix*vec4(a_endPosition, 1.0);
//gl_Vertex;
vec2 slope = normalize(end.xy - start.xy);
slope = vec2(slope.y, -slope.x);
vec2 scale = u_width/u_viewDims;
if (a_choice == 0.0)
gl_Position = vec4(start.xy + a_dir*scale*slope.xy*start.w, start.zw);
else
gl_Position = vec4(end.xy + a_dir*scale*slope.xy*end.w, end.zw);
}
See that I have gl_Vertex, unused, commented out.
int width, height;
glfwGetFramebufferSize(m_window, &width, &height);
glUseProgram(m_shaders[Shader_WideLine]->id());
GLint shaderid = m_shaders[Shader_WideLine]->id();
GLint coloc = glGetUniformLocation(shaderid, "Color");
GLint dimloc = glGetUniformLocation(shaderid, "u_viewDims");
GLint widthloc = glGetUniformLocation(shaderid, "u_width");
GLint mvploc = glGetUniformLocation(shaderid, "u_mvpMatrix");
GLint modelviewloc = glGetUniformLocation(shaderid, "u_modelview");
GLint projloc = glGetUniformLocation(shaderid, "u_projection");
GLint dirloc = glGetAttribLocation(shaderid, "a_dir");
GLint startloc = glGetAttribLocation(shaderid, "a_startPosition");
GLint endloc = glGetAttribLocation(shaderid, "a_endPosition");
GLint chloc = glGetAttribLocation(shaderid, "a_choice");
//////////
//Set Uniforms
//////////
glUniform1f(widthloc, 10);
glUniform2f(dimloc, width, height);
glUniform4f(coloc, 0.101f, 0.558f, 0.109f, 1.f);
glm::mat4 modelview;
glm::mat4 projection;
glGetFloatv(GL_MODELVIEW_MATRIX, glm::value_ptr(modelview));
glGetFloatv(GL_PROJECTION_MATRIX, glm::value_ptr(projection));
glm::mat4 mvp = projection * modelview;
glUniformMatrix4fv(mvploc, 1, GL_FALSE, glm::value_ptr(mvp));
int numpts = 4;
GLfloat v[4][3] = {
{0,1,0},
{0,0,0},
{1,0,0},
{1,1,0}
};
//////////
// Draw (attributes)
//////////
glBegin( GL_TRIANGLE_STRIP );
glNormal3d(0.0, 0.0, 1.0);
for(int i=0; i<numpts-1; i++)
{
glVertexAttrib3fv(startloc, v[i]);
glVertexAttrib3fv(endloc, v[i+1]);
glVertexAttrib1f(chloc, 0);
glVertexAttrib1f(dirloc, -1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(dirloc, 1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(chloc, -1);
glVertexAttrib1f(dirloc, -1.0f);
glVertex3d(0,0,0);
glVertexAttrib1f(dirloc, 1.0f);
glVertex3d(0,0,0);
}
glEnd();
glUseProgram(0);
So I am trying to draw lines from (0,1,0) to (0,0,0) to (1,0,0) to (1,1,0) with a width of 10 pixels. In the following images is a wire cube 2x2x2 centered on the origin for reference.
When called as presented I get the unexpected result of this
If I uncomment gl_Vertex; in the shader, so that it is unused but referenced, I get this expected result.
What is the reason that this could happen?
gl_ModelViewProjectionMatrix is not a valid ES 2.0 vertex shader built-in variable.
You'll have to pass in your MVP via uniform.

Texture mapping of non-rectangle textures - how to get the color of a specific texel

I'm trying to convert a shader wich is written using GLSL version 120 into OpenGl ES 2.0 and I came to a problem on how to map this shader correctly.
On the original shader, I'm using texture2DRect and this is not supported on OpenGL ES ( at least not on the target hardware I'm aming for).
The shader has two textures. One is a texture mapped to a quad, the other one is simply a kind of lookup I use to multiply the texel. The lookup is a 128x128 bitmap.
The original shader looks like this:
float xx = mod(gl_FragCoord.x, 5.0);
float yy = mod(gl_FragCoord.y, 15.0);
vec4 color = texture2DRect(tex0, coord0);
vec4 lookup1 = texture2DRect(tex1, vec2(xx, yy));
gl_FragColor = color * lookup1;
I've changed it to:
float xx = mod(gl_FragCoord.x, 5.0) * (1.0/textsize.x);
float yy = mod(gl_FragCoord.y, 15.0) * (1.0/textsize.y);
vec4 color = texture2D(tex0, coord0);
vec4 lookup1 = texture2D(tex1, vec2(xx, yy));
gl_FragColor = color * lookup1;
Where textsize is a vec2 containing the lookup texture size (128x128)
I need the exact value of the texel at let's say 50,127 on the lookup. I imagined that this should be (50/128, 127/128). This is however not working as I've expected. What am I missing here?
Additional Information:
Texture parameters:
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE)
Texture declarations:
uniform samplerExternalOES tex0;
uniform sampler2D tex1;
update 2013.03.13:
I've implemented now the following function:
vec4 texelFetch(sampler2D texsampler, vec2 coord) {
vec2 mult=(2.0*coord + 1.0)/(2.0* texWidth); // 128.0
vec4 texel = texture2D(texsampler,mult);
return texel;
}
And changed the code to:
float xx = mod(gl_FragCoord.x, 5.0);
float yy = mod(gl_FragCoord.y, 15.0);
vec4 lookup1 = texelFetch(tex1, vec2(xx, yy));
Still, it does not seems to be working.

Could I bind a vec2 array to a vec4 variable in shader language?

Could I bind a vec2 array to a vec4 variable in shader language?
For example, in the code blow, the variable "position" is a vec4 type, and I tried to bind variable "squareVertices" to it, but squareVertices is a vec2 type array, only with (x,y) coodinates, maybe default (x,y,z,w) = (x,y,0,1)?
My Vertex shader and attribute bind code:
attribute vec4 position;
attribute vec2 textureCoordinate;
varying vec2 coordinate;
void main()
{
gl_Position = position;
coordinate = textureCoordinate;
}
glBindAttribLocation(gProgram, ATTRIB_VERTEX, "position");
glBindAttribLocation(gProgram, ATTRIB_TEXTURE, "textureCoordinate");
// bind attribute values
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXTURE, 2, GL_FLOAT, 0, 0, coordVertices);
glEnableVertexAttribArray(ATTRIB_TEXTURE);
// squareVertices definition
static const GLfloat squareVertices[] = {
-1.0f, -1.0f,
0.0f, -1.0f,
-1.0f, 0.0f,
0.0f, 0.0f,
};
Yes, this is legal. And, you're correct that the default values of z and w are 0 and 1, respectively. This is covered in section 2.7 of the OpenGL ES 2.0 specification.

How does sampler2DArrayShadow in glsl works

I am not able understand how to use sampler2DAprrayShadows and how it works. Got some part of it that we need to use depth texture values (GL_DEPTH_COMPONENT) to get the compare result with ref depth.
But then how to use the return float value to get the texel.
Got reference from :
http://www.khronos.org/opengles/sdk/docs/manglsl/xhtml/texture.xml
I have written a small code to test it as below:
My fragment shader as:
in highp vec2 texcoord;
in highp vec4 basecolor;
out highp vec4 myFragColor;
uniform highp sampler2DArrayShadow basetexture;
void main(void)
{
highp vec3 coords = (vec3(texcoord, 0.0) + vec3(1.0, 1.0, 1.0)) / 2.0;
highp float depth = texture(basetexture, vec4(coords.x, coords.y, coords.z, -0.5));
myFragColor = vec4(depth * basecolor);
}
Created 3D texture (texdata), and binding it as :
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_DEPTH_COMPONENT16, TEX_SIZE, TEX_SIZE, TEX_SIZE, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, texdata);
maybe this could help you:
depth cubemap make tutorial
And to be frank, you're supposed to make a frame buffer object which binds a 2D depth texture instead of a Cubemap one. If you want to use sampler: sampler2DArrayShadow, then you just need to overload the glsl function texture, which is texture( Sampler2DArrayShadow gSampler, Vec4 P, [float bias]) .P.z is the number of the 2D tex, and P.w is the reference depth value. The default bias is 0, you can ignore it if don't need one.

Resources