I want to implement the horizontal image slider with three.js - image

I want to implement a horizontal image slider in three.js.
This is the example of a vertical slider.
I want to implement the following image. (horizontal slider). This is the example of a horizontal slider.
vertexShader() {
return `
varying vec2 vUv;
varying vec3 vPosition;
void main() {
vUv = uv;
vPosition = position;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
`
}
fragmentShader() {
return `
varying vec2 vUv;
varying vec3 vPosition;
uniform sampler2D tex0;
uniform sampler2D tex1;
uniform float divider;
uniform float zoomFactor;
uniform bool hidden;
void main() {
float dividerWidth;
if (hidden) {
dividerWidth = 0.0;
} else {
dividerWidth = 0.03 / zoomFactor;
}
if (vPosition.x > divider + dividerWidth) {
gl_FragColor = texture2D(tex1, vUv);
} else if (vPosition.x < divider - dividerWidth) {
gl_FragColor = texture2D(tex0, vUv);
} else {
gl_FragColor = vec4(0.5, 0.5, 1.0, 1.0);
}
}
`
}

You have to text the y component of the texture coordinate rather than the x component of the vertex coordinate. The components of the texture coordinates are in range [0.0, 1.0]. Hence divider has to be value in range [0.0, 1.0], too:
vec4 texColor0 = texture2D(tex0, vUv);
vec4 texColor1 = texture2D(tex1, vUv);
vec4 sliderColor = vec4(0.5, 0.5, 1.0, 1.0);
float limit0 = divider - dividerWidth;
float limit1 = divider + dividerWidth;
gl_FragColor = vUv.y > limit1 ? texColor1 : (vUv.y < limit0 ? texColor0 : sliderColor);

Related

Why does the color set by vertex and fragment shaders in three js depend on the camera position of the scene?

I made 1000 points and gave them coordinates to create a ring shape. I then gave a shader material to the points and pointed to the vertex and fragment shaders.
Vertex Shader:
const vertexShader = `
uniform float uTime;
uniform float uRadius;
varying vec3 vColor;
varying float vDistance;
void main() {
vDistance = distance(position, vec3(0.0));
// Do Not Touch
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4( position, 1.0 );
gl_PointSize = 5.0;
}
`
export default vertexShader
Fragment Shader:
const fragmentShader = `
uniform float uDistance[1000];
uniform float uResolutionWidth;
uniform float uResolutionHeight;
varying float vDistance;
void main() {
vec2 resolution = vec2(uResolutionWidth, uResolutionHeight);
vec2 st = gl_FragCoord.xy/resolution;
float pct = distance(st, vec2(1.0));
vec3 color = vec3(mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), pct));
gl_FragColor = vec4( color, 1.0 );
}
`
export default fragmentShader
What I had wanted to do was assign a color to each point based on its distance to the origin. However I realized what I did is assign a color based on the pointer distance to the camera, or at least it's what its looking like
EDIT:
I tried to pass along a varying vDistance like so
varying vec3 vRealPosition;
void main() {
vDistance = distance(position, vec3(0.0));
vColor = mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), vDistance);
vRealPosition = position;
// Do Not Touch
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4( position, 1.0 );
gl_PointSize = 5.0;
}
But when I used it in fragment shader all points are just blue
varying vec3 vRealPosition;
void main() {
vec2 resolution = vec2(uResolutionWidth, uResolutionHeight);
vec2 st = gl_FragCoord.xy/resolution;
float pct = distance(vRealPosition, vec3(0.0));
vec3 color = vec3(mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), pct));
gl_FragColor = vec4( vColor, 1.0 );
}

reconstruct worldposition.xyz from depth

I want to restore the worldposition.xyz from any pixel of a rendered image for postprocessing. With the help of the example from three.js i reconstructed the depth value. I think that i am close to my goal. Does anyone know how i can reconstruct the world positions from the vUv and the depth value?
depthShader = {
uniforms: {
'tDiffuse': { value: null },
'tDepth': { value: null },
'cameraNear': { value: 0 },
'cameraFar': { value: 0 },
},
vertexShader:`
varying vec2 vUv;
void main() {
vUv = uv;
vec4 modelViewPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewPosition;
}`,
fragmentShader:`
#include <packing>
uniform sampler2D tDiffuse;
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
varying vec2 vUv;
float readDepth( sampler2D depthSampler, vec2 coord ) {
float fragCoordZ = texture2D( depthSampler, coord ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
}
void main() {
float depth = readDepth(tDepth, vUv);
vec4 color = texture2D(tDiffuse, vUv);
gl_FragColor.rgb = 1.0 - vec3( depth );
}`
};
float clipW = cameraProjection[2][3] * viewZ + cameraProjection[3][3];
vec4 clipPosition = vec4( ( vec3( gl_FragCoord.xy / viewport.zw, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW;
vec4 viewPosition = inverseProjection * clipPosition;
vec4 vorldPosition = cameraMatrixWorld * vec4( viewPosition.xyz, 1.0 );

Shader wireframe of an object

I want to see a wireframe of an object without the diagonals like
Currently, I add lines according to the vertices, the problem is after I have several of those I experience a major performance degradation.
The examples here are either too new for my version of Three or don't work (I commented there about it).
So I want to try to implement a shader instead.
I tried to use this shader: https://stackoverflow.com/a/31610464/4279201 but it breaks the shape to parts and I'm getting WebGL errors.
That's how I use it:
const vertexShader = `
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position,1.0);
}
`
const fragmentShader = `
#version 150 compatibility
flat in float diffuse;
flat in float specular;
flat in vec3 edge_mask;
in vec2 bary;
uniform float mesh_width = 1.0;
uniform vec3 mesh_color = vec3(0.0, 0.0, 0.0);
uniform bool lighting = true;
out vec4 frag_color ;
float edge_factor(){
vec3 bary3 = vec3(bary.x, bary.y, 1.0 - bary.x - bary.y);
vec3 d = fwidth(bary3);
vec3 a3 = smoothstep(vec3(0.0, 0.0, 0.0), d * mesh_width, bary3);
a3 = vec3(1.0, 1.0, 1.0) - edge_mask + edge_mask * a3;
return min(min(a3.x, a3.y), a3.z);
}
void main() {
float s = (lighting && gl_FrontFacing) ? 1.0 : -1.0;
vec4 Kdiff = gl_FrontFacing ?
gl_FrontMaterial.diffuse : gl_BackMaterial.diffuse;
float sdiffuse = s * diffuse;
vec4 result = vec4(0.1, 0.1, 0.1, 1.0);
if (sdiffuse > 0.0) {
result += sdiffuse * Kdiff +
specular * gl_FrontMaterial.specular;
}
frag_color = (mesh_width != 0.0) ?
mix(vec4(mesh_color, 1.0), result, edge_factor()) :
result;
}`
...
const uniforms = {
color: {
value: new THREE.Vector4(0, 0, 1, 1),
type: 'v4'
}
}
const material = new THREE.ShaderMaterial({
fragmentShader: data.fragmentShader,
vertexShader: data.vertexShader,
uniforms
})
this._viewer.impl.matman().addMaterial(
data.name, material, true)
const fragList = this._viewer.model.getFragmentList()
this.toArray(fragIds).forEach((fragId) => {
fragList.setMaterial(fragId, material)
})
So to implement this shader, is the right approach would be to basically check the angle between every two vertices, and draw a line if the degree is 90?
How can I have access to all the vertices of the shape from the vertex shader?
And how can I tell the fragment shader to draw a line between two vertices that match the above condition? (also to leave the default shading for everything else as is)
I'm using Autodesk viewer that uses Three.js rev 71.
// -- Vertex Shader --
precision mediump float;
// Input from buffers
attribute vec3 aPosition;
attribute vec2 aBaryCoord;
// Value interpolated accross pixels and passed to the fragment shader
varying vec2 vBaryCoord;
// Uniforms
uniform mat4 uModelMatrix;
uniform mat4 uViewMatrix;
uniform mat4 uProjMatrix;
void main() {
vBaryCoord = aBaryCoord;
gl_Position = uProjMatrix * uViewMatrix * uModelMatrix * vec4(aPosition,1.0);
}
// ---------------------
// -- Fragment Shader --
// This shader doesn't perform any lighting
precision mediump float;
varying vec2 vBaryCoord;
uniform vec3 uMeshColour;
float edgeFactor() {
vec3 d = fwidth(vBaryCoord);
vec3 a3 = smoothstep(vec3(0.0,0.0,0.0),d * 1.5,vBaryCoord);
return min(min(a3.x,a3.y),a3.z);
}
void main() {
gl_FragColor = vec4(uMeshColour,(1.0 - edgeFactor()) * 0.95);
}
// ---------------------
/*
This code isn't tested so take it with a grain of salt
Idea taken from
http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/
*/

Three.JS: Gaussian blur in GLSL shader

I have this vert/frag shader, which is using vertex data and two textures.
I am trying to apply post blur effect, but having only rectangles after it.
vert:
attribute float type;
attribute float size;
attribute float phase;
attribute float increment;
uniform float time;
uniform vec2 resolution;
uniform sampler2D textureA;
uniform sampler2D textureB;
varying float t;
void main() {
t = type;
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0 );
if(t == 0.) {
gl_PointSize = size * 0.8;
} else {
gl_PointSize = size * sin(phase + time * increment) * 12.;
}
gl_Position = projectionMatrix * mvPosition;
}
frag:
uniform float time;
uniform vec2 resolution;
uniform sampler2D textureA;
uniform sampler2D textureB;
varying float t;
uniform sampler2D texture;
vec4 blur2D(sampler2D image, vec2 uv, vec2 resolution, vec2 direction) {
vec4 color = vec4(0.0);
vec2 off1 = vec2(1.3846153846) * direction;
vec2 off2 = vec2(3.2307692308) * direction;
color += texture2D(image, uv) * 0.2270270270;
color += texture2D(image, uv + (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv - (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv + (off2 / resolution)) * 0.0702702703;
color += texture2D(image, uv - (off2 / resolution)) * 0.0702702703;
return color;
}
void main() {
vec2 direction = vec2(1., 0.);
vec2 uv = vec2(gl_FragCoord.xy / resolution.xy);
gl_FragColor = vec4(vec3(1.0, 1.0, 1.0), 1.);
if(t == 0.){
gl_FragColor = gl_FragColor * texture2D(textureA, gl_PointCoord);
} else {
gl_FragColor = gl_FragColor * texture2D(textureB, gl_PointCoord);
}
gl_FragColor = blur2D(texture, uv, resolution.xy, direction);
}
How could I 'bake' everything before applying blurring to texture2D/sampler2D?
Maybe I need to create another blur shader and pass texture2D to it?

Shadow artifacts in opengl

I am trying to render an object and two lights, one of the lights cast shadows. Everything works ok but I noticed that there are some obvious artifacts, as shown in the below image, some shadows seem to overflow to bright areas.
Below is the shaders to render depth information into a framebuffer
<script id="shadow-shader-vertex" type="x-shader/x-vertex">
attribute vec4 aVertexPosition;
uniform mat4 uObjMVP;
void main() {
gl_Position = uObjMVP * aVertexPosition;
}
</script>
<script id="shadow-shader-fragment" type="x-shader/x-vertex">
precision mediump float;
void main() {
//pack gl_FragCoord.z
const vec4 bitShift = vec4(1.0, 256.0, 256.0 * 256.0, 256.0 * 256.0 * 256.0);
const vec4 bitMask = vec4(1.0/256.0, 1.0/256.0, 1.0/256.0, 0.0);
vec4 rgbaDepth = fract(gl_FragCoord.z * bitShift);
rgbaDepth -= rgbaDepth.gbaa * bitMask;
gl_FragColor = rgbaDepth;
}
</script>
In the above shaders, uObjMVP is the MVP matrix used when looking from the position of the light that cast shadow (the warm light, the cold light does not cast shadow)
And here are the shaders to draw everything:
<script id="shader-vertex" type="x-shader/x-vertex">
//position of a vertex.
attribute vec4 aVertexPosition;
//vertex normal.
attribute vec3 aNormal;
//mvp matrix
uniform mat4 uObjMVP;
uniform mat3 uNormalMV;
//shadow mvp matrix
uniform mat4 uShadowMVP;
//interplate normals
varying vec3 vNormal;
//for shadow calculation
varying vec4 vShadowPositionFromLight;
void main() {
gl_Position = uObjMVP * aVertexPosition;
//convert normal direction from object space to view space
vNormal = uNormalMV * aNormal;
vShadowPositionFromLight = uShadowMVP * aVertexPosition;
}
</script>
<script id="shader-fragment" type="x-shader/x-fragment">
precision mediump float;
uniform sampler2D uShadowMap;
varying vec3 vNormal;
varying vec4 vShadowPositionFromLight;
struct baseColor {
vec3 ambient;
vec3 diffuse;
};
struct directLight {
vec3 direction;
vec3 color;
};
baseColor mysObjBaseColor = baseColor(
vec3(1.0, 1.0, 1.0),
vec3(1.0, 1.0, 1.0)
);
directLight warmLight = directLight(
normalize(vec3(-83.064, -1.99, -173.467)),
vec3(0.831, 0.976, 0.243)
);
directLight coldLight = directLight(
normalize(vec3(37.889, 47.864, -207.187)),
vec3(0.196, 0.361, 0.608)
);
vec3 ambientLightColor = vec3(0.3, 0.3, 0.3);
float unpackDepth(const in vec4 rgbaDepth) {
const vec4 bitShift = vec4(1.0, 1.0/256.0, 1.0/(256.0*256.0), 1.0/(256.0*256.0*256.0));
float depth = dot(rgbaDepth, bitShift);
return depth;
}
float calVisibility() {
vec3 shadowCoord = (vShadowPositionFromLight.xyz/vShadowPositionFromLight.w)/2.0 + 0.5;
float depth = unpackDepth(texture2D(uShadowMap, shadowCoord.xy));
return (shadowCoord.z > depth + 0.005) ? 0.4 : 1.0;
}
vec3 calAmbientLight(){
return ambientLightColor * mysObjBaseColor.ambient;
}
vec3 calDiffuseLight(const in directLight light, const in float visibility){
vec3 inverseLightDir = light.direction * -1.0;
float dot = max(dot(inverseLightDir, normalize(vNormal)), 0.0);
return light.color * mysObjBaseColor.diffuse * dot * visibility;
}
void main() {
vec3 ambientLight = calAmbientLight();
float visibility = calVisibility();
vec3 warmDiffuseLight = calDiffuseLight(warmLight, visibility);
// cold light does not cast shadow and hence visilibility is always 1.0
vec3 coldDiffuseLight = calDiffuseLight(coldLight, 1.0);
gl_FragColor = vec4(coldDiffuseLight + warmDiffuseLight + ambientLight, 1.0);
}
</script>
If I simply draw the depth information out on to the canvas,
void main() {
// vec3 ambientLight = calAmbientLight();
// float visibility = calVisibility();
// vec3 warmDiffuseLight = calDiffuseLight(warmLight, visibility);
// // cold light does not cast shadow and hence visilibility is always 1.0
// vec3 coldDiffuseLight = calDiffuseLight(coldLight, 1.0);
// gl_FragColor = vec4(coldDiffuseLight + warmDiffuseLight + ambientLight, 1.0);
vec3 shadowCoord = (vShadowPositionFromLight.xyz/vShadowPositionFromLight.w)/2.0 + 0.5;
gl_FragColor = vec4(unpackDepth(texture2D(uShadowMap, shadowCoord.xy)), 0.0, 0.0, 1.0);
}
I would get this image
Thanks in advance.

Resources