I want to render an IOSurface texture with mipmapping.
I am generating an OpenGL texture from an IOSurface like so:
let error = CGLTexImageIOSurface2D(context.cglContextObj!,
GLenum(GL_TEXTURE_RECTANGLE_ARB),GLenum(GL_RGBA), GLsizei(width), GLsizei(height),
GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
And rendering it with a simple fragment shader:
#version 120
uniform sampler2DRect texture;
varying vec2 texCoord;
uniform vec2 size;
void main(){
vec4 tex = texture2DRect(texture, texCoord*size);
gl_FragColor = tex;
}
This works fine. But unfortunately, the texture is not mipmapped and is very grainy/pixellated when viewed at a smaller size. Upon further research, it appears that not only are IOSurface backed textures limited to a GL_TEXTURE_RECTANGLE_ARB target, they also do not support mipmapping.
Allright. So my best alternative here seems to be to:
Copy the GL_TEXTURE_RECTANGLE_ARB texture to a GL_TEXTURE_2D texture
Generate mipmaps for this new texture
Do note that I am developing on Mac, which does not support OpenGL 4.3, so I cannot use the convenient glCopyImageSubData here. Instead I am basing my approach for Step 1 here on this answer: How to copy texture1 to texture2 efficiently?
My full code now looks like this:
if !hasCreatedTexture {
glGenTextures(1, &texture);
glGenTextures(1, &arbTexture);
glGenFramebuffers(1, &fbo)
hasCreatedTexture = true
}
glEnable(GLenum(GL_TEXTURE_RECTANGLE_ARB))
glBindTexture(GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture)
let error = CGLTexImageIOSurface2D(context.cglContextObj!, GLenum(GL_TEXTURE_RECTANGLE_ARB), GLenum(GL_RGBA), GLsizei(width), GLsizei(height), GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
glBindFramebuffer(GLenum(GL_FRAMEBUFFER), fbo);
glFramebufferTexture2D(GLenum(GL_READ_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0),
GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture, 0);
glFramebufferTexture2D(GLenum(GL_DRAW_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT1),
GLenum(GL_TEXTURE_2D), texture, 0);
glDrawBuffer(GLenum(GL_COLOR_ATTACHMENT1));
glBlitFramebuffer(0, 0, GLint(width), GLint(height), 0, 0, GLint(width), GLint(height), GLbitfield(GL_COLOR_BUFFER_BIT), GLenum(GL_NEAREST));
glBindTexture(GLenum(GL_TEXTURE_2D), texture)
And I changed my shader to support GL_TEXTURE_2D:
#version 120
uniform sampler2D texture;
varying vec2 texCoord;
void main(){
vec4 tex = texture2D(texture, texCoord);
gl_FragColor = tex;
}
And here is where I'm stuck. The image is all black and I have no clue why it's not working.
I tried looking at the pixel data like so:
var pixelData = UnsafeMutablePointer<GLubyte>(allocatingCapacity: Int(width * height * 4))
glGetTexImage(GLenum(GL_TEXTURE_2D), 0, GLenum(GL_BGRA),
GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), pixelData)
print(NSData(bytes: pixelData, length: 50000).description)
And it's all zeroes. Is it something wrong with my approach or could the issue be specifically related to dealing with IOSurface backed textures?
Finally got this working using the following:
func createMipTexture() {
guard let surface = self.surface else { return }
glBindTexture(GLenum(GL_TEXTURE_2D), texture);
glEnable(GLenum(GL_TEXTURE_2D))
guard let address = IOSurfaceGetBaseAddress(surface) as UnsafeMutableRawPointer? else { return }
IOSurfaceLock(surface, .readOnly, nil)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, GLsizei(width), GLsizei(height), 0, GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), address)
IOSurfaceUnlock(surface, .readOnly, nil);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAX_LEVEL), 100)
glHint(GLenum(GL_GENERATE_MIPMAP_HINT), GLenum(GL_NICEST));
glGenerateMipmap(GLenum(GL_TEXTURE_2D))
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_LINEAR);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR_MIPMAP_LINEAR);
}
Related
I'm trying to use multiple textures in expo, but for some reason whenever I try to bind more than one texture, even if the texture is unused in the shader, nothing renders to the screen.
Code that binds textures:
gl.useProgram(program);
gl.bindVertexArray(vao);
// Set texture sampler uniform
const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, dims.width, dims.height);
gl.drawArrays(gl.TRIANGLES, 0, vertices.length / 2);
//console.log("draws")
gl.bindVertexArray(null);
gl.bindTexture(gl.TEXTURE_2D, null);
gl.useProgram(null);
Fragment shader:
#version 300 es
precision highp float;
uniform sampler2D cameraTexture;
uniform sampler2D segmentationTexture;
in vec2 uv;
out vec4 fragColor;
void main() {
fragColor = texture(cameraTexture, uv); //segmentationTexture.r *
}
Vertex Shader:
const horizontalScale = flipHorizontal ? -1 : 1;
return `#version 300 es
precision highp float;
in vec2 position;
in vec2 texCoords;
out vec2 uv;
void main() {
// Invert geometry to match the image orientation from the camera.
gl_Position = vec4(position * vec2(${horizontalScale}., -1.), 0, 1);
uv = texCoords;
}`
I have checked both textures by commenting out the code the binds the other texture, e.g.
const TEXTURE_UNIT = 0;
//const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
//gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
//gl.activeTexture(gl.TEXTURE1);
//gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
or
//const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
//gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
//gl.activeTexture(gl.TEXTURE0);
//gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
and both render fine individually.
However, the act of trying to bind multiple textures, even though only one is used in frag shader, leaves me with only a black screen.
I try to bind a texture to one object and another texture to a next object. I asked here in stackoverflow almost the same question, but the difference in that time was, that I had just one texture and multiple objects.
How to bind texture just to one object in OpenGLES?
It worked for one texture and multiple objects, but now I have multiple textures and multiple objects. I followed the advice in the answers, but I strongly believe that this time the problem is about the fragment shader.
#Override
public void onDrawFrame(GL10 gl) {
GLES20.glClear(GL10.GL_COLOR_BUFFER_BIT);
synchronized (mediaPlayerObject) {
surfaceTextureMediaPlayer.updateTexImage();
mediaPlayeUpdate = false;
}
vertexBuffer.position(0);
GLES20.glVertexAttribPointer(aPositionLocation, POSITION_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer);
vertexBuffer.position(TEXTURE_COMPONENT_COUNT);
GLES20.glVertexAttribPointer(aTextureCoordinatesLocation, TEXTURE_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer);
GLES20.glEnableVertexAttribArray(aPositionLocation);
GLES20.glEnableVertexAttribArray(aTextureCoordinatesLocation);
GLES20.glUniform1i(textureUnitLocation2, 1);
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[1]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
GLES20.glDisableVertexAttribArray(aPositionLocation);
GLES20.glDisableVertexAttribArray(aTextureCoordinatesLocation);
synchronized (camerObject) {
surfaceTextureCamera.updateTexImage();
cameraUpdate = false;
}
vertexBuffer2.position(0);
GLES20.glVertexAttribPointer(aPositionLocation, POSITION_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer2);
vertexBuffer2.position(TEXTURE_COMPONENT_COUNT);
GLES20.glVertexAttribPointer(aTextureCoordinatesLocation, TEXTURE_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer2);
GLES20.glEnableVertexAttribArray(aPositionLocation);
GLES20.glEnableVertexAttribArray(aTextureCoordinatesLocation);
GLES20.glUniform1i(textureUnitLocation, 0);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
GLES20.glDisableVertexAttribArray(aPositionLocation);
GLES20.glDisableVertexAttribArray(aTextureCoordinatesLocation);
Here the fragment shader:
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES u_Texture;
uniform samplerExternalOES b_Texture;
varying vec2 v_TextureCoordinates;
void main() {
gl_FragColor = texture2D(u_Texture, v_TextureCoordinates) * texture2D(b_Texture, v_TextureCoordinates);
}
There is no need for a second texture sampler in the fragment shader, so delete b_Texture. It is sufficient to bind the texture object to the texture unit and to set the index of the texture unit to the texture sampler uniform:
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES u_Texture;
varying vec2 v_TextureCoordinates;
void main() {
gl_FragColor = texture2D(u_Texture, v_TextureCoordinates);
}
Draw 1st object:
GLES20.glUniform1i(textureUnitLocation, 1);
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[1]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
Draw 2nd object:
GLES20.glUniform1i(textureUnitLocation, 0);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
Note, it is not necessary to use 2 different texture units. You can use GL_TEXTURE0 in both cases.
I am trying to use a single cubemap texture for a skybox and reflections, but I just end up with a black texture for both. I am using a temporary array of pixels for the cubemap to make sure the image wasn't the problem. So just ignore the SDL stuff in this function; I commented some of it out. Also the texture functions and shaders are used for both 2d and cubemap textures so I commented that out as well.
Texture loading: zTexture* vars are class vars
bool loadCubeMap(std::vector<const char*> path)
{
//Texture loading success
bool textureLoaded = false;
glEnable(GL_TEXTURE_CUBE_MAP); //probably not needed
for(int j=0; j<path.size(); j++)
{
//SDL_Surface* cubFacSurf = IMG_Load(path[j]);
if(cubFacSurf != NULL)
{
//SDL_LockSurface(cubFacSurf);
zTextureW = cubFacSurf->w;
zTextureH = cubFacSurf->h;
textureLoaded = loadFromPixels((GLuint*)cubFacSurf->pixels, zTextureW, zTextureH, GL_TEXTURE_CUBE_MAP, GL_RGB, GL_TEXTURE_CUBE_MAP_POSITIVE_X+j);
//SDL_UnlockSurface(cubFacSurf);
//SDL_FreeSurface(cubFacSurf);
}
if(textureLoaded == false)
{
SDL_Log("Unable to load %s\n", path[j]);
}
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
return textureLoaded;
}
main pixel loader
bool loadFromPixels(void* pixels, GLuint width, GLuint height, const GLenum tType = GL_TEXTURE_2D, const GLenum glPix = GL_RGB, const GLenum tFace = GL_TEXTURE_2D)
{
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
GLfloat checkerboard[] = {1.f,1.f,1.f, 0.f,0.f,0.f, 0.f,0.f,0.f, 1.f,0.f,1.f};
if(tType == GL_TEXTURE_CUBE_MAP)
glTexImage2D(tFace, 0, glPix, 2, 2, 0, glPix, GL_FLOAT, &checkerboard);
//else
//glTexImage2D(tFace, 0, glPix, width, height, 0, glPix, GL_UNSIGNED_BYTE, pixels);
//Check for error
GLenum error = glGetError();
if( error != GL_NO_ERROR )
{
SDL_Log( "Error loading texture from %p pixels! %d\n", pixels, error);
return false;
}
return true;
}
Texture Binding:
void apply(const GLenum typ = GL_TEXTURE_2D)
{
glEnable(typ); //Probably not necessary doesnt change anything if left out
if(typ == GL_TEXTURE_2D)
{
glDisable(GL_TEXTURE_CUBE_MAP); //same here
glUniform1i(mainTxtrID, 0); //mainTxtrID = Textr in frag
glActiveTexture(GL_TEXTURE0);
}
else
{
glDisable(GL_TEXTURE_2D); //and here
glUniform1i(cubeID, 1); //cubeID = TextCub in frag
glActiveTexture(GL_TEXTURE1);
}
glBindTexture(typ, zTextureID);
}
"Uber" Shaders:
vertex:
#version 100
precision mediump float;
uniform mat4 ModelMat;
uniform mat4 ViewMat;
uniform mat4 ProjMat;
uniform mat4 OrthMat;
uniform bool world;
attribute vec4 vPosition;
attribute vec2 UVCoordAt;
attribute vec3 nPosition;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
UVCoord = UVCoordAt;
vPos = vec3(vPosition); //skybox coords
vNor = normalize(vec3(ModelMat * vec4(nPosition,0.0)));
vRefl = reflect(normalize(vPos - vec3(ViewMat[3][0], ViewMat[3][1], ViewMat[3][2])), vNor); //reflection direction vector
if(world)
gl_Position = ProjMat * ViewMat * ModelMat * vPosition;
else gl_Position = OrthMat * ModelMat * vPosition;
}
fragment:
#version 100
precision mediump float;
uniform samplerCube TextCub;
uniform sampler2D Textr;
uniform vec3 LiPos;
uniform vec4 fragCol;
uniform bool lighting;
uniform bool dimen;
uniform bool isRefl;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
vec4 textVect = texture2D(Textr, UVCoord); //default texturing
if(dimen){ textVect = textureCube(TextCub, vPos); } //skybox
else if(isRefl){ textVect = mix(textVect, textureCube(TextCub, vRefl), 0.7); } //reflections mixed with default textr
if(lighting){
float diffuse = clamp(dot(vNor, LiPos), 0.0, 1.0);
gl_FragColor = clamp(diffuse*2.0, 0.6, 1.1) * fragCol * textVect;
}
else{ gl_FragColor = fragCol * textVect; }
}
I am using GL_DEPTH_TEST, I doubt this affects anything. I am guessing the problem is in the apply() function or something else I left out. There are extensions for cubemaps but I assume the default opengles 2 cubemaps work without them.
You're creating a new texture for each cubemap face. In the loadFromPixels() function, which you call for each face:
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
...
glTexImage2D(...);
This means that you will end up with 6 textures that each have only data for one face specified, which makes them incomplete.
You need to create one cubemap texture, and then specify data for all 6 sides of that cubemap.
I have a cube , i want to bind a computed fragment shader to a cube face. any help?
my vertex shader is :
precision highp float;
attribute vec3 position;
attribute vec3 normal;
uniform mat3 normalMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
varying vec3 fNormal;
varying vec3 fPosition;
void main()
{
fNormal = normalize(normalMatrix * normal);
vec4 pos = modelViewMatrix * vec4(position, 1.0);
fPosition = pos.xyz;
gl_Position = projectionMatrix * pos;
}
and my fragment shader:
precision highp float;
uniform float time;
uniform vec2 resolution;
varying vec3 fPosition;
varying vec3 fNormal;
void main()
{
float minDimension = min(resolution.x, resolution.y);
vec2 bounds = vec2(resolution.x / minDimension, resolution.y / minDimension);
vec2 uv = gl_FragCoord.xy / minDimension;
vec2 midUV = vec2(bounds.x * 0.5, bounds.y * 0.5);
uv.xy-=midUV;
float dist = sqrt(dot(uv, uv));
float t = smoothstep(0.2+0.1*cos(time), 0.1-0.01, dist);
vec4 disc_color=vec4(1.0,0.,0.,1.);
vec4 bkg_color=vec4(0.0, 0.0, 0.0, 1.0);
vec4 resultColor;
resultColor = mix(bkg_color, disc_color, t);
gl_FragColor = resultColor;
}
i get a cube with a dot on the hole image.
but what i want is actually to have the dot on each face.
you can test the code directly under http://shdr.bkcore.com/
The standard approach to do this in OpenGL is to first render your content to a FBO (Frame Buffer Object). This allows you to render to a texture. You can then use this texture for one of the sides when rendering the cube.
The following is not complete code, but just an outline to give you a starting point:
// create a texture, and bind it
GLuint texId = 0;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
// set texture parameters (filters, wrap mode, etc)
glTexParameteri(GL_TEXTURE_2D, ...);
// allocate texture data (no data provided)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, texId);
// create and set up FBO with texture as attachment
GLuint fboId = 0;
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texId, 0);
// create a depth renderbuffer and attach it if depth testing is needed
// start rendering
glViewport(0, 0, width, height);
glClear(GL_COLOR_BUFFER_BIT);
glDraw...(...);
// back to default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, windowWidth, windowHeight);
glClear(GL_COLOR_BUFFER_BIT);
// render cube using texture we rendered to
glBindTexture(GL_TEXTURE_2D, texId);
// rest of state setup and rendering
thx for the answer, this is right, but i found a better way then rendering to a texture. i passed the texture coordinates and used them instead of the glfragcoord in the fragment shader.
I know I have done something wrong, obviously. Anyway I have wrote some code to try and get my textures to animate and not really sure what's gone wrong or what I have done wrong.
here is the code that loads in my textures:
if(PVRTTextureLoadFromPVR(c_szTextureFile, &m_uiTexture[0]) != PVR_SUCCESS)
{
PVRShellSet(prefExitMessage, "ERROR: Cannot load the texture\n");
return false;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//This loads in the second texture
if(PVRTTextureLoadFromPVR(c_szTextureFile2, &m_uiTexture[1]) != PVR_SUCCESS)
{
PVRShellSet(prefExitMessage, "ERROR: Cannot load the texture2\n");
return false;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
and here is my timer function that attempts to update the texture coordinates
int time_Loc = glGetUniformLocation(m_ShaderProgram.uiId, "Time"); //This stores the location of the uniform
float updateTexture;
float timeElapsed = difftime(time(NULL), Timer) * 1000;
if(timeElapsed > 16.0f)
{
glUniform1f(time_Loc, updateTexture++); // passes the updated value to shader
}
and here is my shader that I am passing the data to
uniform sampler2D sTexture;
uniform sampler2D sTexture2;
varying mediump vec2 TexCoord;
varying mediump vec2 TexCoord2;
//This gets updated within the main code
uniform highp float Time;
void main()
{
mediump vec4 tex1 = texture2D(sTexture, TexCoord + Time);
mediump vec4 tex2 = texture2D(sTexture2, TexCoord2 + Time);
gl_FragColor = tex2 * tex1;
}
The second paramater of texture2D is clipped from 0->1, it is not in absolutes or pixels. You have a TexCoord, while I'll assume is also in the (0->1, 0->1) range. You're then "adding" a float value, that is getting incremented in your C code, via updateTexture++. Its also not getting initialized!
Try something more like this:
float updateTexture = 0.0f;
...
if(timeElapsed > 16.0f)
{
updateTexture += 0.01; // arbitrary increment
if(updateTexture > 1.0) updateTexture = 0.0f; // wrap, if you want
glUniform1f(time_Loc, updateTexture); // passes the updated value to shader
}
Then in your shader do something more explicit than adding a float to a vec2:
...
mediump vec4 tex1 = texture2D(sTexture, vec2(TexCoord.x + Time, TexCoord.y + Time));
...