glsl bind computed fragment shader to cube face - opengl-es

I have a cube , i want to bind a computed fragment shader to a cube face. any help?
my vertex shader is :
precision highp float;
attribute vec3 position;
attribute vec3 normal;
uniform mat3 normalMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
varying vec3 fNormal;
varying vec3 fPosition;
void main()
{
fNormal = normalize(normalMatrix * normal);
vec4 pos = modelViewMatrix * vec4(position, 1.0);
fPosition = pos.xyz;
gl_Position = projectionMatrix * pos;
}
and my fragment shader:
precision highp float;
uniform float time;
uniform vec2 resolution;
varying vec3 fPosition;
varying vec3 fNormal;
void main()
{
float minDimension = min(resolution.x, resolution.y);
vec2 bounds = vec2(resolution.x / minDimension, resolution.y / minDimension);
vec2 uv = gl_FragCoord.xy / minDimension;
vec2 midUV = vec2(bounds.x * 0.5, bounds.y * 0.5);
uv.xy-=midUV;
float dist = sqrt(dot(uv, uv));
float t = smoothstep(0.2+0.1*cos(time), 0.1-0.01, dist);
vec4 disc_color=vec4(1.0,0.,0.,1.);
vec4 bkg_color=vec4(0.0, 0.0, 0.0, 1.0);
vec4 resultColor;
resultColor = mix(bkg_color, disc_color, t);
gl_FragColor = resultColor;
}
i get a cube with a dot on the hole image.
but what i want is actually to have the dot on each face.
you can test the code directly under http://shdr.bkcore.com/

The standard approach to do this in OpenGL is to first render your content to a FBO (Frame Buffer Object). This allows you to render to a texture. You can then use this texture for one of the sides when rendering the cube.
The following is not complete code, but just an outline to give you a starting point:
// create a texture, and bind it
GLuint texId = 0;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
// set texture parameters (filters, wrap mode, etc)
glTexParameteri(GL_TEXTURE_2D, ...);
// allocate texture data (no data provided)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, texId);
// create and set up FBO with texture as attachment
GLuint fboId = 0;
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texId, 0);
// create a depth renderbuffer and attach it if depth testing is needed
// start rendering
glViewport(0, 0, width, height);
glClear(GL_COLOR_BUFFER_BIT);
glDraw...(...);
// back to default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, windowWidth, windowHeight);
glClear(GL_COLOR_BUFFER_BIT);
// render cube using texture we rendered to
glBindTexture(GL_TEXTURE_2D, texId);
// rest of state setup and rendering

thx for the answer, this is right, but i found a better way then rendering to a texture. i passed the texture coordinates and used them instead of the glfragcoord in the fragment shader.

Related

Black screen when trying to use multiple textures in Expo

I'm trying to use multiple textures in expo, but for some reason whenever I try to bind more than one texture, even if the texture is unused in the shader, nothing renders to the screen.
Code that binds textures:
gl.useProgram(program);
gl.bindVertexArray(vao);
// Set texture sampler uniform
const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, dims.width, dims.height);
gl.drawArrays(gl.TRIANGLES, 0, vertices.length / 2);
//console.log("draws")
gl.bindVertexArray(null);
gl.bindTexture(gl.TEXTURE_2D, null);
gl.useProgram(null);
Fragment shader:
#version 300 es
precision highp float;
uniform sampler2D cameraTexture;
uniform sampler2D segmentationTexture;
in vec2 uv;
out vec4 fragColor;
void main() {
fragColor = texture(cameraTexture, uv); //segmentationTexture.r *
}
Vertex Shader:
const horizontalScale = flipHorizontal ? -1 : 1;
return `#version 300 es
precision highp float;
in vec2 position;
in vec2 texCoords;
out vec2 uv;
void main() {
// Invert geometry to match the image orientation from the camera.
gl_Position = vec4(position * vec2(${horizontalScale}., -1.), 0, 1);
uv = texCoords;
}`
I have checked both textures by commenting out the code the binds the other texture, e.g.
const TEXTURE_UNIT = 0;
//const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
//gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
//gl.activeTexture(gl.TEXTURE1);
//gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
or
//const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
//gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
//gl.activeTexture(gl.TEXTURE0);
//gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
and both render fine individually.
However, the act of trying to bind multiple textures, even though only one is used in frag shader, leaves me with only a black screen.

Opengl black/blank cubemap texture

I am trying to use a single cubemap texture for a skybox and reflections, but I just end up with a black texture for both. I am using a temporary array of pixels for the cubemap to make sure the image wasn't the problem. So just ignore the SDL stuff in this function; I commented some of it out. Also the texture functions and shaders are used for both 2d and cubemap textures so I commented that out as well.
Texture loading: zTexture* vars are class vars
bool loadCubeMap(std::vector<const char*> path)
{
//Texture loading success
bool textureLoaded = false;
glEnable(GL_TEXTURE_CUBE_MAP); //probably not needed
for(int j=0; j<path.size(); j++)
{
//SDL_Surface* cubFacSurf = IMG_Load(path[j]);
if(cubFacSurf != NULL)
{
//SDL_LockSurface(cubFacSurf);
zTextureW = cubFacSurf->w;
zTextureH = cubFacSurf->h;
textureLoaded = loadFromPixels((GLuint*)cubFacSurf->pixels, zTextureW, zTextureH, GL_TEXTURE_CUBE_MAP, GL_RGB, GL_TEXTURE_CUBE_MAP_POSITIVE_X+j);
//SDL_UnlockSurface(cubFacSurf);
//SDL_FreeSurface(cubFacSurf);
}
if(textureLoaded == false)
{
SDL_Log("Unable to load %s\n", path[j]);
}
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
return textureLoaded;
}
main pixel loader
bool loadFromPixels(void* pixels, GLuint width, GLuint height, const GLenum tType = GL_TEXTURE_2D, const GLenum glPix = GL_RGB, const GLenum tFace = GL_TEXTURE_2D)
{
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
GLfloat checkerboard[] = {1.f,1.f,1.f, 0.f,0.f,0.f, 0.f,0.f,0.f, 1.f,0.f,1.f};
if(tType == GL_TEXTURE_CUBE_MAP)
glTexImage2D(tFace, 0, glPix, 2, 2, 0, glPix, GL_FLOAT, &checkerboard);
//else
//glTexImage2D(tFace, 0, glPix, width, height, 0, glPix, GL_UNSIGNED_BYTE, pixels);
//Check for error
GLenum error = glGetError();
if( error != GL_NO_ERROR )
{
SDL_Log( "Error loading texture from %p pixels! %d\n", pixels, error);
return false;
}
return true;
}
Texture Binding:
void apply(const GLenum typ = GL_TEXTURE_2D)
{
glEnable(typ); //Probably not necessary doesnt change anything if left out
if(typ == GL_TEXTURE_2D)
{
glDisable(GL_TEXTURE_CUBE_MAP); //same here
glUniform1i(mainTxtrID, 0); //mainTxtrID = Textr in frag
glActiveTexture(GL_TEXTURE0);
}
else
{
glDisable(GL_TEXTURE_2D); //and here
glUniform1i(cubeID, 1); //cubeID = TextCub in frag
glActiveTexture(GL_TEXTURE1);
}
glBindTexture(typ, zTextureID);
}
"Uber" Shaders:
vertex:
#version 100
precision mediump float;
uniform mat4 ModelMat;
uniform mat4 ViewMat;
uniform mat4 ProjMat;
uniform mat4 OrthMat;
uniform bool world;
attribute vec4 vPosition;
attribute vec2 UVCoordAt;
attribute vec3 nPosition;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
UVCoord = UVCoordAt;
vPos = vec3(vPosition); //skybox coords
vNor = normalize(vec3(ModelMat * vec4(nPosition,0.0)));
vRefl = reflect(normalize(vPos - vec3(ViewMat[3][0], ViewMat[3][1], ViewMat[3][2])), vNor); //reflection direction vector
if(world)
gl_Position = ProjMat * ViewMat * ModelMat * vPosition;
else gl_Position = OrthMat * ModelMat * vPosition;
}
fragment:
#version 100
precision mediump float;
uniform samplerCube TextCub;
uniform sampler2D Textr;
uniform vec3 LiPos;
uniform vec4 fragCol;
uniform bool lighting;
uniform bool dimen;
uniform bool isRefl;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
vec4 textVect = texture2D(Textr, UVCoord); //default texturing
if(dimen){ textVect = textureCube(TextCub, vPos); } //skybox
else if(isRefl){ textVect = mix(textVect, textureCube(TextCub, vRefl), 0.7); } //reflections mixed with default textr
if(lighting){
float diffuse = clamp(dot(vNor, LiPos), 0.0, 1.0);
gl_FragColor = clamp(diffuse*2.0, 0.6, 1.1) * fragCol * textVect;
}
else{ gl_FragColor = fragCol * textVect; }
}
I am using GL_DEPTH_TEST, I doubt this affects anything. I am guessing the problem is in the apply() function or something else I left out. There are extensions for cubemaps but I assume the default opengles 2 cubemaps work without them.
You're creating a new texture for each cubemap face. In the loadFromPixels() function, which you call for each face:
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
...
glTexImage2D(...);
This means that you will end up with 6 textures that each have only data for one face specified, which makes them incomplete.
You need to create one cubemap texture, and then specify data for all 6 sides of that cubemap.

Convert IOSurface-backed texture to GL_TEXTURE_2D

I want to render an IOSurface texture with mipmapping.
I am generating an OpenGL texture from an IOSurface like so:
let error = CGLTexImageIOSurface2D(context.cglContextObj!,
GLenum(GL_TEXTURE_RECTANGLE_ARB),GLenum(GL_RGBA), GLsizei(width), GLsizei(height),
GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
And rendering it with a simple fragment shader:
#version 120
uniform sampler2DRect texture;
varying vec2 texCoord;
uniform vec2 size;
void main(){
vec4 tex = texture2DRect(texture, texCoord*size);
gl_FragColor = tex;
}
This works fine. But unfortunately, the texture is not mipmapped and is very grainy/pixellated when viewed at a smaller size. Upon further research, it appears that not only are IOSurface backed textures limited to a GL_TEXTURE_RECTANGLE_ARB target, they also do not support mipmapping.
Allright. So my best alternative here seems to be to:
Copy the GL_TEXTURE_RECTANGLE_ARB texture to a GL_TEXTURE_2D texture
Generate mipmaps for this new texture
Do note that I am developing on Mac, which does not support OpenGL 4.3, so I cannot use the convenient glCopyImageSubData here. Instead I am basing my approach for Step 1 here on this answer: How to copy texture1 to texture2 efficiently?
My full code now looks like this:
if !hasCreatedTexture {
glGenTextures(1, &texture);
glGenTextures(1, &arbTexture);
glGenFramebuffers(1, &fbo)
hasCreatedTexture = true
}
glEnable(GLenum(GL_TEXTURE_RECTANGLE_ARB))
glBindTexture(GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture)
let error = CGLTexImageIOSurface2D(context.cglContextObj!, GLenum(GL_TEXTURE_RECTANGLE_ARB), GLenum(GL_RGBA), GLsizei(width), GLsizei(height), GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
glBindFramebuffer(GLenum(GL_FRAMEBUFFER), fbo);
glFramebufferTexture2D(GLenum(GL_READ_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0),
GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture, 0);
glFramebufferTexture2D(GLenum(GL_DRAW_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT1),
GLenum(GL_TEXTURE_2D), texture, 0);
glDrawBuffer(GLenum(GL_COLOR_ATTACHMENT1));
glBlitFramebuffer(0, 0, GLint(width), GLint(height), 0, 0, GLint(width), GLint(height), GLbitfield(GL_COLOR_BUFFER_BIT), GLenum(GL_NEAREST));
glBindTexture(GLenum(GL_TEXTURE_2D), texture)
And I changed my shader to support GL_TEXTURE_2D:
#version 120
uniform sampler2D texture;
varying vec2 texCoord;
void main(){
vec4 tex = texture2D(texture, texCoord);
gl_FragColor = tex;
}
And here is where I'm stuck. The image is all black and I have no clue why it's not working.
I tried looking at the pixel data like so:
var pixelData = UnsafeMutablePointer<GLubyte>(allocatingCapacity: Int(width * height * 4))
glGetTexImage(GLenum(GL_TEXTURE_2D), 0, GLenum(GL_BGRA),
GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), pixelData)
print(NSData(bytes: pixelData, length: 50000).description)
And it's all zeroes. Is it something wrong with my approach or could the issue be specifically related to dealing with IOSurface backed textures?
Finally got this working using the following:
func createMipTexture() {
guard let surface = self.surface else { return }
glBindTexture(GLenum(GL_TEXTURE_2D), texture);
glEnable(GLenum(GL_TEXTURE_2D))
guard let address = IOSurfaceGetBaseAddress(surface) as UnsafeMutableRawPointer? else { return }
IOSurfaceLock(surface, .readOnly, nil)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, GLsizei(width), GLsizei(height), 0, GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), address)
IOSurfaceUnlock(surface, .readOnly, nil);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAX_LEVEL), 100)
glHint(GLenum(GL_GENERATE_MIPMAP_HINT), GLenum(GL_NICEST));
glGenerateMipmap(GLenum(GL_TEXTURE_2D))
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_LINEAR);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR_MIPMAP_LINEAR);
}

Same texture offseting different position in the fragment shader using threejs rendering engine

My vertex shader:
varying vec2 texCoord;
void main() {
texCoord = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
My fragment shader:
varying vec2 texCoord;
uniform sampler2D texture1;
uniform sampler2D texture2;
uniform float multiplier;
void main( void ) {
vec3 tex1 = texture2D(texture1, texCoord).xyz ;
vec3 tex2 = texture2D(texture2, texCoord).xyz ;
vec3 finaltex = mix( tex1, tex2, multiplier) ;
gl_FragColor = vec4(finaltex , 1.0);
}
Now this work very well when i run using the two texture.check http://shaderfrog.com/app/view/68 for the multiplier action.
But now what I want is I am having texture like this:
So with the single texture I want to index the offset of my texCoord so that I just need to sample one texture and I can get three representation form this like:
var shaderMaterial = new THREE.ShaderMaterial({
uniforms:{
texture1: { type: "t", value: texture1 }
},
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent
});
Can offset my tri-color in the fragment shader. or Some one can help me modifying the fragment shader so that I can pass uniform to index my tri-color into individual Yellow,pink,red.
So either from shader or threejs I can get help regarding same.
I have done reference using two texture because I want to interpolate with cross fade effect on the texture same I want cross fade using fragment shader (independent to this I have already achieve by the texture.offset.x = currentColoum / horizontal and texture.offset.y = currentRow / Vertical;
I found the answer to this question, even implemented into application :D.
vertexShader:
void main() {
texCoord = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
FragmentShader:
varying vec2 texCoord;
uniform sampler2D texture;
uniform vec2 offset_current;
uniform vec2 offset_next;
uniform float multiplier;
void main( void ) {
vec2 offset1 = offset_current + vec2( texCoord.x* 1.0, texCoord.y * 0.333333);
vec2 offset2 = offset_next + vec2( texCoord.x* 1.0, texCoord.y * 0.333333);
vec3 tex1 = texture2D(texture1,offset1).xyz ;
vec3 tex2 = texture2D(texture1, offset2).xyz ;
vec3 mixCol = mix( tex1, tex2, multiplier );
vec4 fragColor = vec4(mixCol, 1.0);
if(fragColor.a == 0.0)
discard;
gl_FragColor = fragColor;
}
Explanation:
Since I am having the vertical texture of three different type I make my offset into y direction 0.3333. Because texture is read from [0,1]. I have extended this code same for the horizontal direction.
If some one going to make this dynamic then instead of hard coded we can pass the 0.3333 as the calculate one taking the inspiration form link.

WebGL lighting isn't applied to some edges?

No idea what this is called, but when viewing my model at a distance, it doesn't look like the lighting is being applied to some of the edges, which makes them stand out. Any ideas? I'm not using any textures.
Vertex Shader:
attribute vec3 aPosition;
attribute vec3 aNormal;
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
uniform mat4 uNormalMatrix;
varying vec3 vLighting;
void main(void) {
vec3 ambientLight = vec3(0.2, 0.2, 0.2);
vec3 directionalLight = vec3(1.0, 0.5, 0.5);
vec3 directionalLightDirection = vec3(0.8, 0.7, 0.6);
vec4 transformedNormal = uNormalMatrix * vec4(aNormal, 1.0);
float directional = max(dot(transformedNormal.xyz, directionalLightDirection), 0.0);
vLighting = ambientLight + (directionalLight * directional);
gl_Position = uPMatrix * uMVMatrix * vec4(aPosition / 80.0, 1.0);
}
Fragment Shader:
precision mediump float;
varying vec3 vLighting;
void main(void) {
vec4 skin = vec4(0.8, 0.6, 0.4, 1.0);
gl_FragColor = vec4(skin.rgb * vLighting, skin.a);
}
Picture:

Resources