I have this vert/frag shader, which is using vertex data and two textures.
I am trying to apply post blur effect, but having only rectangles after it.
vert:
attribute float type;
attribute float size;
attribute float phase;
attribute float increment;
uniform float time;
uniform vec2 resolution;
uniform sampler2D textureA;
uniform sampler2D textureB;
varying float t;
void main() {
t = type;
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0 );
if(t == 0.) {
gl_PointSize = size * 0.8;
} else {
gl_PointSize = size * sin(phase + time * increment) * 12.;
}
gl_Position = projectionMatrix * mvPosition;
}
frag:
uniform float time;
uniform vec2 resolution;
uniform sampler2D textureA;
uniform sampler2D textureB;
varying float t;
uniform sampler2D texture;
vec4 blur2D(sampler2D image, vec2 uv, vec2 resolution, vec2 direction) {
vec4 color = vec4(0.0);
vec2 off1 = vec2(1.3846153846) * direction;
vec2 off2 = vec2(3.2307692308) * direction;
color += texture2D(image, uv) * 0.2270270270;
color += texture2D(image, uv + (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv - (off1 / resolution)) * 0.3162162162;
color += texture2D(image, uv + (off2 / resolution)) * 0.0702702703;
color += texture2D(image, uv - (off2 / resolution)) * 0.0702702703;
return color;
}
void main() {
vec2 direction = vec2(1., 0.);
vec2 uv = vec2(gl_FragCoord.xy / resolution.xy);
gl_FragColor = vec4(vec3(1.0, 1.0, 1.0), 1.);
if(t == 0.){
gl_FragColor = gl_FragColor * texture2D(textureA, gl_PointCoord);
} else {
gl_FragColor = gl_FragColor * texture2D(textureB, gl_PointCoord);
}
gl_FragColor = blur2D(texture, uv, resolution.xy, direction);
}
How could I 'bake' everything before applying blurring to texture2D/sampler2D?
Maybe I need to create another blur shader and pass texture2D to it?
Related
I made 1000 points and gave them coordinates to create a ring shape. I then gave a shader material to the points and pointed to the vertex and fragment shaders.
Vertex Shader:
const vertexShader = `
uniform float uTime;
uniform float uRadius;
varying vec3 vColor;
varying float vDistance;
void main() {
vDistance = distance(position, vec3(0.0));
// Do Not Touch
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4( position, 1.0 );
gl_PointSize = 5.0;
}
`
export default vertexShader
Fragment Shader:
const fragmentShader = `
uniform float uDistance[1000];
uniform float uResolutionWidth;
uniform float uResolutionHeight;
varying float vDistance;
void main() {
vec2 resolution = vec2(uResolutionWidth, uResolutionHeight);
vec2 st = gl_FragCoord.xy/resolution;
float pct = distance(st, vec2(1.0));
vec3 color = vec3(mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), pct));
gl_FragColor = vec4( color, 1.0 );
}
`
export default fragmentShader
What I had wanted to do was assign a color to each point based on its distance to the origin. However I realized what I did is assign a color based on the pointer distance to the camera, or at least it's what its looking like
EDIT:
I tried to pass along a varying vDistance like so
varying vec3 vRealPosition;
void main() {
vDistance = distance(position, vec3(0.0));
vColor = mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), vDistance);
vRealPosition = position;
// Do Not Touch
gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4( position, 1.0 );
gl_PointSize = 5.0;
}
But when I used it in fragment shader all points are just blue
varying vec3 vRealPosition;
void main() {
vec2 resolution = vec2(uResolutionWidth, uResolutionHeight);
vec2 st = gl_FragCoord.xy/resolution;
float pct = distance(vRealPosition, vec3(0.0));
vec3 color = vec3(mix(vec3(1.0, 0.0, 0.0), vec3(0.0, 0.0, 1.0), pct));
gl_FragColor = vec4( vColor, 1.0 );
}
I want to restore the worldposition.xyz from any pixel of a rendered image for postprocessing. With the help of the example from three.js i reconstructed the depth value. I think that i am close to my goal. Does anyone know how i can reconstruct the world positions from the vUv and the depth value?
depthShader = {
uniforms: {
'tDiffuse': { value: null },
'tDepth': { value: null },
'cameraNear': { value: 0 },
'cameraFar': { value: 0 },
},
vertexShader:`
varying vec2 vUv;
void main() {
vUv = uv;
vec4 modelViewPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewPosition;
}`,
fragmentShader:`
#include <packing>
uniform sampler2D tDiffuse;
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
varying vec2 vUv;
float readDepth( sampler2D depthSampler, vec2 coord ) {
float fragCoordZ = texture2D( depthSampler, coord ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
}
void main() {
float depth = readDepth(tDepth, vUv);
vec4 color = texture2D(tDiffuse, vUv);
gl_FragColor.rgb = 1.0 - vec3( depth );
}`
};
float clipW = cameraProjection[2][3] * viewZ + cameraProjection[3][3];
vec4 clipPosition = vec4( ( vec3( gl_FragCoord.xy / viewport.zw, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW;
vec4 viewPosition = inverseProjection * clipPosition;
vec4 vorldPosition = cameraMatrixWorld * vec4( viewPosition.xyz, 1.0 );
I imported a simple animated object from Blender on three bones. I have a problem with lighting of skinning object. I set a light position above the object:
const vec3 lightPosition = vec3(0.0, 15.0, 0.0);
You can see that lighting is affected under object too:
precision mediump float;
attribute vec3 aPosition;
attribute vec4 aNormal;
attribute vec2 aTexCoord;
attribute vec3 aJoints;
attribute vec3 aWeights;
uniform mat4 uMvpMatrix;
uniform mat4 uModelMatrix;
uniform mat4 uNormalMatrix;
uniform mat4 uTransforms[3];
varying vec3 vPosition;
varying vec3 vNormal;
varying vec2 vTexCoord;
void main()
{
vec4 totalLocalPos = vec4(0.0);
vec4 totalNormal = vec4(0.0);
for (int i = 0; i < 3; i++)
{
int jointIndex = int(aJoints[i]);
mat4 jointTransform = uTransforms[jointIndex];
vec4 posePosition = jointTransform * vec4(aPosition, 1.0);
totalLocalPos += posePosition * aWeights[i];
vec4 worldNormal = jointTransform * aNormal;
totalNormal += worldNormal * aWeights[i];
}
gl_Position = uMvpMatrix * totalLocalPos;
vPosition = vec3(uModelMatrix * vec4(aPosition, 1.0));
vNormal = totalNormal.xyz;
vTexCoord = aTexCoord;
}
precision mediump float;
const vec3 lightColor = vec3(0.8, 0.8, 0.8);
const vec3 lightPosition = vec3(0.0, 15.0, 0.0);
const vec3 ambientLight = vec3(0.3, 0.3, 0.3);
uniform sampler2D uSampler;
varying vec3 vPosition;
varying vec3 vNormal;
varying vec2 vTexCoord;
void main()
{
vec4 color = texture2D(uSampler, vTexCoord);
vec3 normal = normalize(vNormal);
vec3 lightDirection = normalize(lightPosition - vPosition);
float nDotL = max(dot(lightDirection, normal), 0.0);
vec3 diffuse = lightColor * color.rgb * nDotL;
vec3 ambient = ambientLight * color.rgb;
gl_FragColor = vec4(diffuse + ambient, color.a);
}
Aroch helped me on Russian forum here:
the normal only needs to be rotated.
Now it works as it should. I took shaders (except lighting model) from source for a video tutorial series from ThinMatrix. It works fine for ThinMatrix. This means that for some reason its lighting model is not affected by the translation of the normal vector. It will be necessary to figure out later why he does not have this problem. I made the transfer of the array of rotation matrices a separate uniform:
uniform mat4 uTransforms[3];
uniform mat4 uRotations[3];
...
for (int i = 0; i < 3; i++)
{
int jointIndex = int(aJoints[i]);
mat4 jointTransform = uTransforms[jointIndex];
vec4 posePosition = jointTransform * vec4(aPosition, 1.0);
totalLocalPos += posePosition * aWeights[i];
mat4 rotation = uRotations[jointIndex];
vec4 worldNormal = rotation * aNormal;
totalNormal += worldNormal * aWeights[i];
}
Gif Animation: https://gamedev.ru/files/images/solution-with-lighting.gif
I want to implement a horizontal image slider in three.js.
This is the example of a vertical slider.
I want to implement the following image. (horizontal slider). This is the example of a horizontal slider.
vertexShader() {
return `
varying vec2 vUv;
varying vec3 vPosition;
void main() {
vUv = uv;
vPosition = position;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
`
}
fragmentShader() {
return `
varying vec2 vUv;
varying vec3 vPosition;
uniform sampler2D tex0;
uniform sampler2D tex1;
uniform float divider;
uniform float zoomFactor;
uniform bool hidden;
void main() {
float dividerWidth;
if (hidden) {
dividerWidth = 0.0;
} else {
dividerWidth = 0.03 / zoomFactor;
}
if (vPosition.x > divider + dividerWidth) {
gl_FragColor = texture2D(tex1, vUv);
} else if (vPosition.x < divider - dividerWidth) {
gl_FragColor = texture2D(tex0, vUv);
} else {
gl_FragColor = vec4(0.5, 0.5, 1.0, 1.0);
}
}
`
}
You have to text the y component of the texture coordinate rather than the x component of the vertex coordinate. The components of the texture coordinates are in range [0.0, 1.0]. Hence divider has to be value in range [0.0, 1.0], too:
vec4 texColor0 = texture2D(tex0, vUv);
vec4 texColor1 = texture2D(tex1, vUv);
vec4 sliderColor = vec4(0.5, 0.5, 1.0, 1.0);
float limit0 = divider - dividerWidth;
float limit1 = divider + dividerWidth;
gl_FragColor = vUv.y > limit1 ? texColor1 : (vUv.y < limit0 ? texColor0 : sliderColor);
I am trying to render an object and two lights, one of the lights cast shadows. Everything works ok but I noticed that there are some obvious artifacts, as shown in the below image, some shadows seem to overflow to bright areas.
Below is the shaders to render depth information into a framebuffer
<script id="shadow-shader-vertex" type="x-shader/x-vertex">
attribute vec4 aVertexPosition;
uniform mat4 uObjMVP;
void main() {
gl_Position = uObjMVP * aVertexPosition;
}
</script>
<script id="shadow-shader-fragment" type="x-shader/x-vertex">
precision mediump float;
void main() {
//pack gl_FragCoord.z
const vec4 bitShift = vec4(1.0, 256.0, 256.0 * 256.0, 256.0 * 256.0 * 256.0);
const vec4 bitMask = vec4(1.0/256.0, 1.0/256.0, 1.0/256.0, 0.0);
vec4 rgbaDepth = fract(gl_FragCoord.z * bitShift);
rgbaDepth -= rgbaDepth.gbaa * bitMask;
gl_FragColor = rgbaDepth;
}
</script>
In the above shaders, uObjMVP is the MVP matrix used when looking from the position of the light that cast shadow (the warm light, the cold light does not cast shadow)
And here are the shaders to draw everything:
<script id="shader-vertex" type="x-shader/x-vertex">
//position of a vertex.
attribute vec4 aVertexPosition;
//vertex normal.
attribute vec3 aNormal;
//mvp matrix
uniform mat4 uObjMVP;
uniform mat3 uNormalMV;
//shadow mvp matrix
uniform mat4 uShadowMVP;
//interplate normals
varying vec3 vNormal;
//for shadow calculation
varying vec4 vShadowPositionFromLight;
void main() {
gl_Position = uObjMVP * aVertexPosition;
//convert normal direction from object space to view space
vNormal = uNormalMV * aNormal;
vShadowPositionFromLight = uShadowMVP * aVertexPosition;
}
</script>
<script id="shader-fragment" type="x-shader/x-fragment">
precision mediump float;
uniform sampler2D uShadowMap;
varying vec3 vNormal;
varying vec4 vShadowPositionFromLight;
struct baseColor {
vec3 ambient;
vec3 diffuse;
};
struct directLight {
vec3 direction;
vec3 color;
};
baseColor mysObjBaseColor = baseColor(
vec3(1.0, 1.0, 1.0),
vec3(1.0, 1.0, 1.0)
);
directLight warmLight = directLight(
normalize(vec3(-83.064, -1.99, -173.467)),
vec3(0.831, 0.976, 0.243)
);
directLight coldLight = directLight(
normalize(vec3(37.889, 47.864, -207.187)),
vec3(0.196, 0.361, 0.608)
);
vec3 ambientLightColor = vec3(0.3, 0.3, 0.3);
float unpackDepth(const in vec4 rgbaDepth) {
const vec4 bitShift = vec4(1.0, 1.0/256.0, 1.0/(256.0*256.0), 1.0/(256.0*256.0*256.0));
float depth = dot(rgbaDepth, bitShift);
return depth;
}
float calVisibility() {
vec3 shadowCoord = (vShadowPositionFromLight.xyz/vShadowPositionFromLight.w)/2.0 + 0.5;
float depth = unpackDepth(texture2D(uShadowMap, shadowCoord.xy));
return (shadowCoord.z > depth + 0.005) ? 0.4 : 1.0;
}
vec3 calAmbientLight(){
return ambientLightColor * mysObjBaseColor.ambient;
}
vec3 calDiffuseLight(const in directLight light, const in float visibility){
vec3 inverseLightDir = light.direction * -1.0;
float dot = max(dot(inverseLightDir, normalize(vNormal)), 0.0);
return light.color * mysObjBaseColor.diffuse * dot * visibility;
}
void main() {
vec3 ambientLight = calAmbientLight();
float visibility = calVisibility();
vec3 warmDiffuseLight = calDiffuseLight(warmLight, visibility);
// cold light does not cast shadow and hence visilibility is always 1.0
vec3 coldDiffuseLight = calDiffuseLight(coldLight, 1.0);
gl_FragColor = vec4(coldDiffuseLight + warmDiffuseLight + ambientLight, 1.0);
}
</script>
If I simply draw the depth information out on to the canvas,
void main() {
// vec3 ambientLight = calAmbientLight();
// float visibility = calVisibility();
// vec3 warmDiffuseLight = calDiffuseLight(warmLight, visibility);
// // cold light does not cast shadow and hence visilibility is always 1.0
// vec3 coldDiffuseLight = calDiffuseLight(coldLight, 1.0);
// gl_FragColor = vec4(coldDiffuseLight + warmDiffuseLight + ambientLight, 1.0);
vec3 shadowCoord = (vShadowPositionFromLight.xyz/vShadowPositionFromLight.w)/2.0 + 0.5;
gl_FragColor = vec4(unpackDepth(texture2D(uShadowMap, shadowCoord.xy)), 0.0, 0.0, 1.0);
}
I would get this image
Thanks in advance.