OpenGL ES: Combine two Frame Buffer Objects - opengl-es

I draw two FBO offscreen. Now I wanna overlay them deppending on the depthbuffer values.
How can I do this?
I found a solution:
at first I render the depthbuffer in a texture which is attached to the FBO.
second I load it in the fragmentshader to compare the two deptbuffers.
My Fragmentshader looks like this.
precision mediump float;
varying vec2 v_texCoord;
uniform sampler2D s_texture1;
uniform sampler2D s_texture2;
uniform sampler2D s_depth1;
uniform sampler2D s_depth2;
void main(){
vec4 tex1;
vec4 tex2;
vec4 depth1;
vec4 depth2;
depth1 = texture2D( s_depth1, v_texCoord);
depth2 = texture2D( s_depth2, v_texCoord);
tex1 = texture2D( s_texture1, v_texCoord );
tex2 = texture2D( s_texture2, v_texCoord );
if ( depth1.r < depth2.r ){
gl_FragColor = tex1;
}else{
gl_FragColor = tex2;
}
}
But it seems like this is not that high-performance.
Is it possible to use a attached renderbuffer in the FBO and access to the data of the renderbuffer in fragmentshader like I do it now?
Would this increase my performance?
Thank you very much.
ragards Mat.

Related

How to place a texture in a specific location?

In my code, I'm mixing two textures. I want to position a texture at any place on the plane but when I add an offset to the texture UV XY coordinate the image just gets stretched.
offsetText1 = vec2(0.1,0.1);
vec4 displacement = texture2D(utexture1,vUv+offsetText1);
How do I move the texture to any position without stretching it?
VERTEX SHADER:
varying vec2 vUv;
uniform sampler2D utexture1;
uniform sampler2D utexture2;
varying vec2 offsetText1;
void main() {
offsetText1 = vec2(0.1,0.1);
vUv = uv;
vec4 modelPosition = modelMatrix * vec4(position, 1.0);
vec4 displacement = texture2D(utexture1,vUv+offsetText1);
vec4 displacement2 = texture2D(utexture2,vUv);
modelPosition.z += displacement.r*1.0;
modelPosition.z += displacement2.r*40.0;
gl_Position = projectionMatrix * viewMatrix * modelPosition;
}
FRAGMENT SHADER:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D utexture1;
uniform sampler2D utexture2;
varying vec2 vUv;
varying vec2 offsetText1;
void main() {
vec3 c;
vec4 Ca = texture2D(utexture1,vUv+offsetText1 );
vec4 Cb = texture2D(utexture2,vUv);
c = Ca.rgb * Ca.a + Cb.rgb * Cb.a * (2.0 - Ca.a);
gl_FragColor = vec4(c, 1.0);
}
image with offsetText1 = vec2(0.0,0.0);
no stretching
image with offsetText1 = vec2(0.1,0.1); image is being stretched from the top right corner.
stretching
That's the behavior of textures. They extend in the range from [0, 1], so when you go beyond 1 or below 0, they'll "wrap". You need to tell it what to do when wrapping. Do you want it to repeat, stretch, or mirror?
You could establish this with the texture.wrapS and .wrapT properties, which accepts one of 3 values:
THREE.RepeatWrapping
THREE.ClampToEdgeWrapping
THREE.MirroredRepeatWrapping
If you want to just show white where the texture extends out of bounds, then you'd have to do that programmatically in your shader code. Here's some pseudocode:
if (uv < 0 || > 1)
color = white

Three.js renders unprocessed png image for texture

With three.js, I am trying to create the scene where a plane becomes transparent as the camera moves away from it.
And I textured the plane object with the round map tile which is edited from the square image below.
When I load the round image through ShaderMaterial the texture appears square like the original image.
The weird thing is it is rendered as intended when the image is loaded onto regular mesh material.
Could you tell me why three.js behaves this way? Also, how might I render round tile using shader while keeping its functionality to fade based on distance?
the full code is available here: https://codesandbox.io/s/tile-with-shader-7kw5v?file=/src/index.js
Here is an option, that takes in count only x and z coords of the plane and the camera.
vertex.glsl:
varying vec4 vPosition;
varying vec2 vUv;
void main() {
vPosition = modelMatrix * vec4(position, 1.);
vUv = uv;
gl_Position = projectionMatrix * viewMatrix * vPosition;
}
frag.glsl:
uniform vec3 u_color;
uniform vec3 u_camera;
uniform vec3 u_plane;
uniform float u_rad;
uniform sampler2D u_texture;
varying vec4 vPosition;
varying vec2 vUv;
void main() {
vec4 textureCol = texture2D(u_texture, vUv);
float rad = distance(vPosition.xz, u_camera.xz); // xz-plane
textureCol.a = 1.0 - step(1., rad / u_rad);
gl_FragColor = textureCol;
}
and u_rad uniform is
u_rad: { value: 50 },

Three.js THREE.InstancedBufferGeometry multiple sampler2D vertical tearings

I have created a RawShaderMaterial for an InstancedBufferGeometry object that is rendering well with 1 sampler2D uniform. As soon as it uses a second sampler2D uniform, it renders with a lot of vertical tearings
Here is the fragment shader:
precision highp float;
uniform sampler2D utexture1;
uniform sampler2D utexture2;
varying float vindex;
varying vec2 vUv;
varying vec3 mapcolor;
vec4 gettexture(){
vec4 color;
if(vindex==0.){
color = texture2D(utexture1, vUv)*vec4(mapcolor,1.);
}else if(vindex==1.){
color = texture2D(utexture1, vUv)*vec4(mapcolor,1.);
}else if(vindex==2.){
color = texture2D(utexture2, vUv)*vec4(mapcolor,1.);
}else if(vindex==3.){
color = texture2D(utexture2, vUv)*vec4(mapcolor,1.);
}
return color;
}
void main() {
gl_FragColor = gettexture();
}
Notes: The 2 textures used for the sampler2D have the same size (512x512) and they are loaded before material creation.
Anyone knows where these vertical tearings come from?
Thank you in advance for your help!

How to write a only alpha > 0 to the stencil buffer?

I am trying to render a texture to the stencil buffer. I only need pixels where their alpha is > 0, but my code is rendering every pixel of my quad - even the ones with 0 alpha. How can I avoid this?
Heres my code:
GL.StencilOp(StencilOp.Keep, StencilOp.Keep, StencilOp.Incr);
GL.ColorMask(false, false, false, false);
GL.DepthMask(false);
RenderMask(mask);
GL.StencilFunc(StencilFunction.Equal, 1, 0xFF);
GL.StencilOp(StencilOp.Keep, StencilOp.Keep, StencilOp.Keep);
GL.ColorMask(true, true, true, true);
GL.DepthMask(true);
When debugging with RenderDoc I see that the stencil buffer contains 1s where my texture is... but its a rectangle, it does not take alpha into account.
Heres my fragment shader (it works fine for normal rendering):
varying lowp vec4 vColor;
varying lowp vec2 vTexCoords;
uniform lowp sampler2D uTexture;
void main() {
gl_FragColor = texture2D(uTexture, vTexCoords) * vColor;
}
Use a "discard" statement in the shader to drop the fragments you don't want to keep.
varying lowp vec4 vColor;
varying lowp vec2 vTexCoords;
uniform lowp sampler2D uTexture;
void main() {
vec4 color = texture2D(uTexture, vTexCoords) * vColor;
if (color.a == 0.0) {
discard;
}
gl_FragColor = color;
}

How to use a 3x3 homography matrix in OpenGL ES shaders?

I have a 3x3 homography matrix that works correctly with OpenCV's warpPerspective, but I need to do the warping on GPU for performance reasons. What is the best approach? I tried multiplying in the vertex shader to get the texture coordinates and then render a quad, but I get strange distortions. I'm not sure if it's the interpolation not working as I expect. Attaching output for comparison (it involves two different, but close enough shots).
Absolute difference of warp and other image from GPU:
Composite of warp and other image in OpenCV:
EDIT:
Following are my shaders: the task is image rectification (making epilines become scanlines) + absolute difference.
// Vertex Shader
static const char* warpVS = STRINGIFY
(
uniform highp mat3 homography1;
uniform highp mat3 homography2;
uniform highp int width;
uniform highp int height;
attribute highp vec2 position;
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
highp vec2 convertToTexture(highp vec3 pixelCoords) {
pixelCoords /= pixelCoords.z; // need to project
pixelCoords /= vec3(float(width), float(height), 1.0);
pixelCoords.y = 1.0 - pixelCoords.y; // origin is in bottom left corner for textures
return pixelCoords.xy;
}
void main(void)
{
gl_Position = vec4(position / vec2(float(width) / 2.0, float(height) / 2.0) - vec2(1.0), 0.0, 1.0);
gl_Position.y = -gl_Position.y;
highp vec3 initialCoords = vec3(position, 1.0);
refTexCoords = convertToTexture(homography1 * initialCoords);
curTexCoords = convertToTexture(homography2 * initialCoords);
}
);
// Fragment Shader
static const char* warpFS = STRINGIFY
(
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
uniform mediump sampler2D refTex;
uniform mediump sampler2D curTex;
uniform mediump sampler2D maskTex;
void main(void)
{
if (texture2D(maskTex, refTexCoords).r == 0.0) {
discard;
}
if (any(bvec4(curTexCoords[0] < 0.0, curTexCoords[1] < 0.0, curTexCoords[0] > 1.0, curTexCoords[1] > 1.0))) {
discard;
}
mediump vec4 referenceColor = texture2D(refTex, refTexCoords);
mediump vec4 currentColor = texture2D(curTex, curTexCoords);
gl_FragColor = vec4(abs(referenceColor.r - currentColor.r), 1.0, 0.0, 1.0);
}
);
I think you just need to do the projection per pixel. Make refTexCoords and curTexCoords at least vec3, then do the /z in the pixel shader before texture lookup. Even better use the textureProj GLSL instruction.
You want to do everything that is linear in the vertex shader, but things like projection need to be done in the fragment shader per pixel.
This link might help with some background: http://www.reedbeta.com/blog/2012/05/26/quadrilateral-interpolation-part-1/

Resources