How can you change the position of instanced vertexes from a square to a circle?
I'm attempting to control the positioning of the vertices, specifically to constrain them into the shape of a circle rather than a square. I can't use the fragment shader (which I'm more comfortable with moulding shapes in) because I'm instancing the vertexes.
//UV for texture
vUv = uv;
vec3 pos;
vec3 globalPos;
vec3 tile;
globalPos.x = offset.x-posX*delta;
globalPos.z = offset.z-posZ*delta;
tile.x = floor((globalPos.x + 0.5 * width) / width);
tile.z = floor((globalPos.z + 0.5 * width) / width);
pos.x = globalPos.x - tile.x * width;
pos.z = globalPos.z - tile.z * width;
pos.y = max(0.0, placeOnSphere(pos)) - radius;
pos.y += getYPosition(vec2(pos.x+delta*posX, pos.z+delta*posZ));
//Position of the blade in the visible patch [0->1]
vec2 fractionalPos = 0.5 + offset.xz / width;
Can anyone point me in the right direction for how I would do this?
Picture for illustration purposes:
The entire vertex shader:
precision mediump float;
attribute vec3 position;
attribute vec3 normal;
attribute vec3 offset;
attribute vec2 uv;
attribute vec2 halfRootAngle;
attribute float scale;
attribute float index;
uniform float time;
uniform float delta;
uniform float posX;
uniform float posZ;
uniform float radius;
uniform float width;
uniform float bladeHeight;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
varying vec2 vUv;
varying vec3 vNormal;
varying vec3 vPosition;
varying float frc;
varying float idx;
const float PI = 3.1415;
const float TWO_PI = 2.0 * PI;
//https://www.geeks3d.com/20141201/how-to-rotate-a-vertex-by-a-quaternion-in-glsl/
vec3 rotateVectorByQuaternion(vec3 v, vec4 q){
return 2.0 * cross(q.xyz, v * q.w + cross(q.xyz, v)) + v;
}
float placeOnSphere(vec3 v){
float theta = acos(v.z/radius);
float phi = acos(v.x/(radius * sin(theta)));
float sV = radius * sin(theta) * sin(phi);
//If undefined, set to default value
if(sV != sV){
sV = v.y;
}
return sV;
}
void main() {
//Vertex height in blade geometry
frc = position.y / float(bladeHeight);
//Scale vertices
vec3 vPosition = position;
vPosition.y *= scale;
//Invert scaling for normals
vNormal = normal;
vNormal.y /= scale;
//Rotate blade around Y axis
vec4 direction = vec4(0.0, halfRootAngle.x, 0.0, halfRootAngle.y);
vPosition = rotateVectorByQuaternion(vPosition, direction);
vNormal = rotateVectorByQuaternion(vNormal, direction);
//UV for texture
vUv = uv;
vec3 pos;
vec3 globalPos;
vec3 tile;
globalPos.x = offset.x-posX*delta;
globalPos.z = offset.z-posZ*delta;
tile.x = floor((globalPos.x + 0.5 * width) / width);
tile.z = floor((globalPos.z + 0.5 * width) / width);
pos.x = globalPos.x - tile.x * width;
pos.z = globalPos.z - tile.z * width;
pos.y = max(0.0, placeOnSphere(pos)) - radius;
pos.y += getYPosition(vec2(pos.x+delta*posX, pos.z+delta*posZ));
//Position of the blade in the visible patch [0->1]
vec2 fractionalPos = 0.5 + offset.xz / width;
//To make it seamless, make it a multiple of 2*PI
fractionalPos *= TWO_PI;
//Wind is sine waves in time.
float noise = sin(fractionalPos.x + time);
float halfAngle = noise * 0.1;
noise = 0.5 + 0.5 * cos(fractionalPos.y + 0.25 * time);
halfAngle -= noise * 0.2;
direction = normalize(vec4(sin(halfAngle), 0.0, -sin(halfAngle), cos(halfAngle)));
//Rotate blade and normals according to the wind
vPosition = rotateVectorByQuaternion(vPosition, direction);
vNormal = rotateVectorByQuaternion(vNormal, direction);
//Move vertex to global location
vPosition += pos;
//Index of instance for varying colour in fragment shader
idx = index;
gl_Position = projectionMatrix * modelViewMatrix * vec4(vPosition, 1.0);
}
I try to implement Pure Depth SSAO, using this tutorial, into an OpenGL ES 2.0 engine.
Now I experience flickering, which looks like I read from somewhere, where I have no data.
Can you see where I made a mistake or do you have an idea how to solve the flickering problem ? I need this to run on mobile and html5 with forward rendering, thats why I use the depth only version of SSAO.
Many thanks
Video: Youtube
GLSL Code:
uniform sampler2D texture0;
uniform sampler2D texture1;
varying vec2 uvVarying;
vec3 GetNormalFromDepth(float depth, vec2 uv);
uniform mediump vec2 agk_resolution;
uniform float ssaoStrength;
uniform float ssaoBase;
uniform float ssaoArea;
uniform float ssaoFalloff;
uniform float ssaoRadius;
const int samples = 16;
vec3 sampleSphere[samples];
void main()
{
highp float depth = texture2D(texture0, uvVarying).r;
vec3 random = normalize( texture2D(texture1, uvVarying * agk_resolution / 64.0).rgb );
vec3 position = vec3(uvVarying, depth);
vec3 normal = GetNormalFromDepth(depth, uvVarying);
sampleSphere[0] = vec3( 0.5381, 0.1856,-0.4319);
sampleSphere[1] = vec3( 0.1379, 0.2486, 0.4430);
sampleSphere[2] = vec3( 0.3371, 0.5679,-0.0057);
sampleSphere[3] = vec3(-0.6999,-0.0451,-0.0019);
sampleSphere[3] = vec3( 0.0689,-0.1598,-0.8547);
sampleSphere[5] = vec3( 0.0560, 0.0069,-0.1843);
sampleSphere[6] = vec3(-0.0146, 0.1402, 0.0762);
sampleSphere[7] = vec3( 0.0100,-0.1924,-0.0344);
sampleSphere[8] = vec3(-0.3577,-0.5301,-0.4358);
sampleSphere[9] = vec3(-0.3169, 0.1063, 0.0158);
sampleSphere[10] = vec3( 0.0103,-0.5869, 0.0046);
sampleSphere[11] = vec3(-0.0897,-0.4940, 0.3287);
sampleSphere[12] = vec3( 0.7119,-0.0154,-0.0918);
sampleSphere[13] = vec3(-0.0533, 0.0596,-0.5411);
sampleSphere[14] = vec3( 0.0352,-0.0631, 0.5460);
sampleSphere[15] = vec3(-0.4776, 0.2847,-0.0271);
float radiusDepth = ssaoRadius/depth;
float occlusion = 0.0;
for(int i=0; i < samples; i++)
{
vec3 ray = radiusDepth * reflect(sampleSphere[i], random);
vec3 hemiRay = position + sign(dot(ray, normal)) * ray;
float occDepth = texture2D(texture0, clamp(hemiRay.xy, 0.0, 1.0)).r;
float difference = depth - occDepth;
occlusion += step(ssaoFalloff, difference) * (1.0 - smoothstep(ssaoFalloff, ssaoArea, difference));
// float rangeCheck = abs(difference) < radiusDepth ? 1.0 : 0.0;
// occlusion += (occDepth <= position.z ? 1.0 : 0.0) * rangeCheck;
}
float ao = 1.0 - ssaoStrength * occlusion * (1.0 / float(samples));
gl_FragColor = vec4(clamp(ao + ssaoBase, 0.0, 1.0));
}
vec3 GetNormalFromDepth(float depth, vec2 uv)
{
vec2 offset1 = vec2(0.0,1.0/agk_resolution.y);
vec2 offset2 = vec2(1.0/agk_resolution.x,0.0);
float depth1 = texture2D(texture0, uv + offset1).r;
float depth2 = texture2D(texture0, uv + offset2).r;
vec3 p1 = vec3(offset1, depth1 - depth);
vec3 p2 = vec3(offset2, depth2 - depth);
vec3 normal = cross(p1, p2);
normal.z = -normal.z;
return normalize(normal);
}
I carefully checked my code and the code you (Rabbid76) created for JSFiddle and came across the if (depth > 0.0) statement which solved the problem... so you somehow answered my question and I would like to thank you and mark you for that
I'm changing the z coordinate vertices on my geometry but find that the Mesh Stays the same size, and I'm expecting it to get smaller. Tweening between vertex positions works as expected in X,Y space however.
This is how I'm calculating my gl_Position by tweening the amplitude uniform in my render function:
<script type="x-shader/x-vertex" id="vertexshader">
uniform float amplitude;
uniform float direction;
uniform vec3 cameraPos;
uniform float time;
attribute vec3 tweenPosition;
varying vec2 vUv;
void main() {
vec3 pos = position;
vec3 morphed = vec3( 0.0, 0.0, 0.0 );
morphed += ( tweenPosition - position ) * amplitude;
morphed += pos;
vec4 mvPosition = modelViewMatrix * vec4( morphed * vec3(1, -1, 0), 1.0 );
vUv = uv;
gl_Position = projectionMatrix * mvPosition;
}
</script>
I also tried something like this from calculating perspective on webglfundamentals:
vec4 newPos = projectionMatrix * mvPosition;
float zToDivideBy = 1.0 + newPos.z * 1.0;
gl_Position = vec4(newPos.xyz, zToDivideBy);
This is my loop to calculate another vertex set that I'm tweening between:
for (var i = 0; i < positions.length; i++) {
if ((i+1) % 3 === 0) {
// subtracting from z coord of each vertex
tweenPositions[i] = positions[i]- (Math.random() * 2000);
} else {
tweenPositions[i] = positions[i]
}
}
I get the same results with this -- objects further away in Z-Space do not scale / attenuate / do anything different. What gives?
morphed * vec3(1, -1, 0)
z is always zero in your code.
[x,y,z] * [1,-1,0] = [x,-y,0]
In my WebGL shader I would like to map the U value of my texture based on the output of a function (atan) whose range is [0,2*PI). But the range of U (as expected by texture2D) is [0,1]. So I'm trying to map an open interval to a closed interval.
This shows the problem:
The horizontal red gradient is the U axis and goes from Red=1 to Red=0 as my atan goes from 0 to 2*PI. But atan treats 2*PI as zero so there is a red band on the right after the gradient has gone black. (There are red bands on the top and bottom too, but that is a similar problem having to do with the V value, which I'm ignoring for the purposes of this question).
See this image using three.js' ability to show the vertices:
You can see how the right-most vertices (U=1) are red corresponding again to atan=0 instead of 2*PI.
Any suggestions on how to accomplish this? I can't force atan to return a 2*PI. I don't want to tile the texture. Can I map the U value to an open interval somehow?
I keep thinking there must be an easy solution but have tried every fix I can think of.
Here is my vertex shader:
void main()
{
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0 );
gl_Position = projectionMatrix * mvPosition;
// convert from uv to polar coords
vec2 tempuv = uv;
theta = (1.0-tempuv[1]) * PI;
phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
x = sin(theta)*cos(phi);
y = sin(theta)*sin(phi);
z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan2(y, x);
if (phi < 0.0) {
phi = phi + PI*2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
Here is my fragment shader:
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
One way of looking at the problem is as you mentioned, 1 comes 0 at the edge. But another way of looking at it is if you changed uv to go from 0 to 2 instead of 0 to 1 and you then used fract(uv) you'd get the same problem several times over because you're effectively sampling a function and each point can only choose 1 color whereas to map it correctly you'd need some how have each point magically pick 2 colors for the vertices that need to be one color for interpolating to the left and another for interpolating to the right.
Example with fract(uv * 2.)
var vs = `
#define PI radians(180.)
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
// convert from uv to polar coords
vec2 tempuv = fract(texcoord * 2.);
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
Moving the code to the fragment shader effectively solves it.
Example with code moved to fragment shader
var vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
vUv = texcoord;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
#define PI radians(180.)
void main() {
// convert from uv to polar coords
vec2 tempuv = vUv;
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
gl_FragColor = vec4(1.0- newuv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
Keeping it a vertex shader one solution is just to fudge the numbers so they're between say 0.00005 and 0.99995.
var vs = `
#define PI radians(180.)
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
// convert from uv to polar coords
vec2 tempuv = texcoord * 0.9999 + 0.00005;
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
This only works though because the texcoords go from 0 to 1. If they went from zero to > 1 (or less than 0) you'd run into the same problem as above that certain vertices need more than 1 color. You'd basically need to use the fragment shader solution
So I've recently gotten into using WebGL and more specifically writing GLSL Shaders and I have run into a snag while writing the fragment shader for my "water" shader which is derived from this tutorial.
What I'm trying to achieve is a stepped shading (Toon shading, cell shading...) effect on waves generated by my vertex shader but the fragment shader seems to treat the waves as though they are still a flat plane and the entire mesh is drawn as one solid color.
What am I missing here? The sphere works perfectly but flat surfaces are all shaded uniformly. I have the same problem if I use a cube. Each face on the cube is shaded independently but the entire face is given a solid color.
The Scene
This is how I have my test scene set up. I have two meshes using the same material - a sphere and a plane and a light source.
The Problem
As you can see the shader is working as expected on the sphere.
I enabled wireframe for this shot to show that the vertex shader (perlin noise) is working beautifully on the plane.
But when I turn the wireframe off you can see that the fragment shader seems to be receiving the same level of light uniformly across the entire plane creating this...
Rotating the plane to face the light source will change the color of the material but again the color is applied uniformly over the entire surface of the plane.
The Fragment Shader
In all it's script kid glory lol.
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize( vNormal );
// check the diffuse dot product against uBorder and adjust
// this diffuse value accordingly.
float diffuse = max( dot( normal, lVector ), 0.0);
if (diffuse > 0.95)
color = vec4(1.0,0.0,0.0,1.0);
else if (diffuse > 0.85)
color = vec4(0.9,0.0,0.0,1.0);
else if (diffuse > 0.75)
color = vec4(0.8,0.0,0.0,1.0);
else if (diffuse > 0.65)
color = vec4(0.7,0.0,0.0,1.0);
else if (diffuse > 0.55)
color = vec4(0.6,0.0,0.0,1.0);
else if (diffuse > 0.45)
color = vec4(0.5,0.0,0.0,1.0);
else if (diffuse > 0.35)
color = vec4(0.4,0.0,0.0,1.0);
else if (diffuse > 0.25)
color = vec4(0.3,0.0,0.0,1.0);
else if (diffuse > 0.15)
color = vec4(0.2,0.0,0.0,1.0);
else if (diffuse > 0.05)
color = vec4(0.1,0.0,0.0,1.0);
else
color = vec4(0.05,0.0,0.0,1.0);
gl_FragColor = color;
The Vertex Shader
vec3 mod289(vec3 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 mod289(vec4 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 permute(vec4 x)
{
return mod289(((x*34.0)+1.0)*x);
}
vec4 taylorInvSqrt(vec4 r)
{
return 1.79284291400159 - 0.85373472095314 * r;
}
vec3 fade(vec3 t) {
return t*t*t*(t*(t*6.0-15.0)+10.0);
}
// Classic Perlin noise
float cnoise(vec3 P)
{
vec3 Pi0 = floor(P); // Integer part for indexing
vec3 Pi1 = Pi0 + vec3(1.0); // Integer part + 1
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
// Classic Perlin noise, periodic variant
float pnoise(vec3 P, vec3 rep)
{
vec3 Pi0 = mod(floor(P), rep); // Integer part, modulo period
vec3 Pi1 = mod(Pi0 + vec3(1.0), rep); // Integer part + 1, mod period
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
varying vec2 vUv;
varying float noise;
uniform float time;
// for the cell shader
varying vec3 vNormal;
varying vec3 vViewPosition;
float turbulence( vec3 p ) {
float w = 100.0;
float t = -.5;
for (float f = 1.0 ; f <= 10.0 ; f++ ){
float power = pow( 2.0, f );
t += abs( pnoise( vec3( power * p ), vec3( 10.0, 10.0, 10.0 ) ) / power );
}
return t;
}
varying vec3 vertexWorldPos;
void main() {
vUv = uv;
// add time to the noise parameters so it's animated
noise = 10.0 * -.10 * turbulence( .5 * normal + time );
float b = 25.0 * pnoise( 0.05 * position + vec3( 2.0 * time ), vec3( 100.0 ) );
float displacement = - 10. - noise + b;
vec3 newPosition = position + normal * displacement;
gl_Position = projectionMatrix * modelViewMatrix * vec4( newPosition, 1.0 );
// for the cell shader effect
vNormal = normalize( normalMatrix * normal );
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
vViewPosition = -mvPosition.xyz;
}
Worth Mention
I am using the Three.js library
My light source is an instance of THREE.SpotLight
First of all, shadows are completely different. Your problem here is a lack of change in the per-vertex normal after displacement. Correcting this is not going to get you shadows, but your lighting will at least vary across your displaced geometry.
If you have access to partial derivatives, you can do this in the fragment shader. Otherwise, you are kind of out of luck in GL ES, due to a lack of vertex adjacency information. You could also compute per-face normals with a Geometry Shader, but that is not an option in WebGL.
This should be all of the necessary changes to implement this, note that it requires partial derivative support (optional extension in OpenGL ES 2.0).
Vertex Shader:
varying vec3 vertexViewPos; // NEW
void main() {
...
vec3 newPosition = position + normal * displacement;
vertexViewPos = (modelViewMatrix * vec4 (newPosition, 1.0)).xyz; // NEW
...
}
Fragment Shader:
#extension GL_OES_standard_derivatives : require
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
varying vec3 vertexViewPos; // NEW
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize(cross (dFdx (vertexViewPos), dFdy (vertexViewPos))); // UPDATED
...
}
To enable partial derivative support in WebGL you need to check the extension like this:
var ext = gl.getExtension("OES_standard_derivatives");
if (!ext) {
alert("OES_standard_derivatives does not exist on this machine");
return;
}
// proceed with the shaders above.