Should you hardcode texture coordinates array in vertex shader when possible? - performance

So i have made a voxel engine where instead of passing texture coordinates array to VAO, i hardcode that array in vertex shader:
const vec2 texcoords[4] = vec2[4](
const vec2(1.0, 1.0),
const vec2(1.0, 0.0),
const vec2(0.0, 0.0),
const vec2(0.0, 1.0)
);
And instead i pass vertexIndex to VAO (so vertexIndex determines which index of this array to use).
so it saves me up a LOT of memory, i could fit every info about each voxel in single uint. but i want to know what downsides does this method have, should i use this when memory profit is not as much?
EDIT:
here is code for chunk.vert:
#version 460 core
in uint vertex;
out vec2 pass_texcoord;
out float pass_faceLight;
uniform mat4 projection;
uniform mat4 view;
uniform uint texAtlasRowsCount;
uniform vec3 chunkPos;
#define vertexIndex (vertex & 0x600000) >> 21
#define xc (vertex & 0x3F)
#define yc ((vertex & 0xFC0) >> 6)
#define zc ((vertex & 0x3F000) >> 12)
#define light ((vertex & 0x1C0000) >> 18) * 0.2F
#define pos vec4(vec3(xc, yc, zc) + chunkPos, 1)
#define position projection * view * pos
const vec2 texcoords[4] = vec2[4](
const vec2(1.0, 1.0),
const vec2(1.0, 0.0),
const vec2(0.0, 0.0),
const vec2(0.0, 1.0)
);
void main() {
uint row = ((vertex & 0x3F800000) >> 23) - 1;
const uint col = row / texAtlasRowsCount;
row %= texAtlasRowsCount;
pass_texcoord = texcoords[vertexIndex];
pass_texcoord.x += row;
pass_texcoord.y += col;
pass_texcoord /= texAtlasRowsCount;
//float AmbientOclussion = float((vertex & 0xC0000000) >> 30);
pass_faceLight = light;
gl_Position = position;
}

Related

Implement antialiasing logic for line segments and triangles in GLSL shaders

I'm building 2D Graph structure based on Three.js, all elements of the graph (nodes, edges, triangles for arrows) calculated in shaders. I was able to reach a good level of antialiasing for nodes (circles) but stuck with same task for lines and triangles.
I was able to reach a good antialiasing results for nodes (circles) with and without stroke following this question: How can I add a uniform width outline to WebGL shader drawn circles/ellipses (drawn using edge/distance antialiasing) , my code, responsible for antialiasing alpha:
`float strokeWidth = 0.09;
float outerEdgeCenter = 0.5 - strokeWidth;
float d = distance(vUV, vec2(.5, .5));
float delta = fwidth(d);
float alpha = 1.0 - smoothstep(0.45 - delta, 0.45, d);
float stroke = 1.0 - smoothstep(outerEdgeCenter - delta,
outerEdgeCenter + delta, d);`
But now I'm completely stack with edges and triangles to do same stuff.
Here is an example of shapes images that I have now (on non retina displays):
To reduce under-sampling artifacts I want to do similar algorithms (as for circles) directly in shaders by manipulating alpha and already find some materials related to this topic:
https://thebookofshaders.com/glossary/?search=smoothstep - seems to be the closest solution but unfortunately I wasn't able to implement it properly and figure out how to set up y equation for segmented lines.
https://discourse.threejs.org/t/shader-to-create-an-offset-inward-growing-stroke/6060/12 - last answer, looks promising but not give me proper result.
https://www.shadertoy.com/view/4dcfW8 - also do not give proper result.
Here is an examples of my shaders for lines and triangles:
Line VertexShader (is a slightly adapted version of WestLangley's LineMaterial shader):
`precision highp float;
#include <common>
#include <color_pars_vertex>
#include <fog_pars_vertex>
#include <logdepthbuf_pars_vertex>
#include <clipping_planes_pars_vertex>
uniform float linewidth;
uniform vec2 resolution;
attribute vec3 instanceStart;
attribute vec3 instanceEnd;
attribute vec3 instanceColorStart;
attribute vec3 instanceColorEnd;
attribute float alphaStart;
attribute float alphaEnd;
attribute float widthStart;
attribute float widthEnd;
varying vec2 vUv;
varying float alphaTest;
void trimSegment( const in vec4 start, inout vec4 end ) {
// trim end segment so it terminates between the camera plane and the near plane
// conservative estimate of the near plane
float a = projectionMatrix[ 2 ][ 2 ]; // 3nd entry in 3th column
float b = projectionMatrix[ 3 ][ 2 ]; // 3nd entry in 4th column
float nearEstimate = - 0.5 * b / a;
float alpha = ( nearEstimate - start.z ) / ( end.z - start.z );
end.xyz = mix( start.xyz, end.xyz, alpha );
}
void main() {
#ifdef USE_COLOR
vColor.xyz = ( position.y < 0.5 ) ? instanceColorStart : instanceColorEnd;
alphaTest = ( position.y < 0.5 ) ? alphaStart : alphaEnd;
#endif
float aspect = resolution.x / resolution.y;
vUv = uv;
// camera space
vec4 start = modelViewMatrix * vec4( instanceStart, 1.0 );
vec4 end = modelViewMatrix * vec4( instanceEnd, 1.0 );
// special case for perspective projection, and segments that terminate either in, or behind, the camera plane
// clearly the gpu firmware has a way of addressing this issue when projecting into ndc space
// but we need to perform ndc-space calculations in the shader, so we must address this issue directly
// perhaps there is a more elegant solution -- WestLangley
bool perspective = ( projectionMatrix[ 2 ][ 3 ] == - 1.0 ); // 4th entry in the 3rd column
if (perspective) {
if (start.z < 0.0 && end.z >= 0.0) {
trimSegment( start, end );
} else if (end.z < 0.0 && start.z >= 0.0) {
trimSegment( end, start );
}
}
// clip space
vec4 clipStart = projectionMatrix * start;
vec4 clipEnd = projectionMatrix * end;
// ndc space
vec2 ndcStart = clipStart.xy / clipStart.w;
vec2 ndcEnd = clipEnd.xy / clipEnd.w;
// direction
vec2 dir = ndcEnd - ndcStart;
// account for clip-space aspect ratio
dir.x *= aspect;
dir = normalize( dir );
// perpendicular to dir
vec2 offset = vec2( dir.y, - dir.x );
// undo aspect ratio adjustment
dir.x /= aspect;
offset.x /= aspect;
// sign flip
if ( position.x < 0.0 ) offset *= - 1.0;
// endcaps, to round line corners
if ( position.y < 0.0 ) {
// offset += - dir;
} else if ( position.y > 1.0 ) {
// offset += dir;
}
// adjust for linewidth
offset *= (linewidth * widthStart);
// adjust for clip-space to screen-space conversion // maybe resolution should be based on viewport ...
offset /= resolution.y;
// select end
vec4 clip = ( position.y < 0.5 ) ? clipStart : clipEnd;
// back to clip space
offset *= clip.w;
clip.xy += offset;
gl_Position = clip;
vec4 mvPosition = ( position.y < 0.5 ) ? start : end; // this is an approximation
#include <logdepthbuf_vertex>
#include <clipping_planes_vertex>
#include <fog_vertex>
}`
Line FragmentShader:
`precision highp float;
#include <common>
#include <color_pars_fragment>
#include <fog_pars_fragment>
#include <logdepthbuf_pars_fragment>
#include <clipping_planes_pars_fragment>
uniform vec3 diffuse;
uniform float opacity;
varying vec2 vUv;
varying float alphaTest;
void main() {
if ( abs( vUv.y ) > 1.0 ) {
float a = vUv.x;
float b = ( vUv.y > 0.0 ) ? vUv.y - 1.0 : vUv.y + 1.0;
float len2 = a * a + b * b;
if ( len2 > 1.0 ) discard;
}
vec4 diffuseColor = vec4( diffuse, alphaTest );
#include <logdepthbuf_fragment>
#include <color_fragment>
gl_FragColor = vec4( diffuseColor.rgb, diffuseColor.a );
#include <premultiplied_alpha_fragment>
#include <tonemapping_fragment>
#include <encodings_fragment>
#include <fog_fragment>
}`
Triangle vertex shader:
`precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
uniform float zoomLevel;
attribute vec3 position;
attribute vec3 vertexPos;
attribute vec3 color;
attribute float alpha;
attribute float xAngle;
attribute float yAngle;
attribute float xScale;
attribute float yScale;
varying vec4 vColor;
// transforms the 'positions' geometry with instance attributes
vec3 transform( inout vec3 position, vec3 T) {
position.x *= xScale;
position.y *= yScale;
// Rotate the position
vec3 rotatedPosition = vec3(
position.x * yAngle + position.y * xAngle,
position.y * yAngle - position.x * xAngle, 0);
position = rotatedPosition + T;
// return the transformed position
return position;
}
void main() {
vec3 pos = position;
vColor = vec4(color, alpha);
// transform it
transform(pos, vertexPos);
gl_Position = projectionMatrix * modelViewMatrix * vec4( pos, 1.0 );
}`
Triangle FragmentShader:
`precision highp float;
varying vec4 vColor;
void main() {
gl_FragColor = vColor;
}`
Will really appreciate any help on how to do it or suggestion of right direction for further investigations. Thank you!

Pure Depth SSAO flickering

I try to implement Pure Depth SSAO, using this tutorial, into an OpenGL ES 2.0 engine.
Now I experience flickering, which looks like I read from somewhere, where I have no data.
Can you see where I made a mistake or do you have an idea how to solve the flickering problem ? I need this to run on mobile and html5 with forward rendering, thats why I use the depth only version of SSAO.
Many thanks
Video: Youtube
GLSL Code:
uniform sampler2D texture0;
uniform sampler2D texture1;
varying vec2 uvVarying;
vec3 GetNormalFromDepth(float depth, vec2 uv);
uniform mediump vec2 agk_resolution;
uniform float ssaoStrength;
uniform float ssaoBase;
uniform float ssaoArea;
uniform float ssaoFalloff;
uniform float ssaoRadius;
const int samples = 16;
vec3 sampleSphere[samples];
void main()
{
highp float depth = texture2D(texture0, uvVarying).r;
vec3 random = normalize( texture2D(texture1, uvVarying * agk_resolution / 64.0).rgb );
vec3 position = vec3(uvVarying, depth);
vec3 normal = GetNormalFromDepth(depth, uvVarying);
sampleSphere[0] = vec3( 0.5381, 0.1856,-0.4319);
sampleSphere[1] = vec3( 0.1379, 0.2486, 0.4430);
sampleSphere[2] = vec3( 0.3371, 0.5679,-0.0057);
sampleSphere[3] = vec3(-0.6999,-0.0451,-0.0019);
sampleSphere[3] = vec3( 0.0689,-0.1598,-0.8547);
sampleSphere[5] = vec3( 0.0560, 0.0069,-0.1843);
sampleSphere[6] = vec3(-0.0146, 0.1402, 0.0762);
sampleSphere[7] = vec3( 0.0100,-0.1924,-0.0344);
sampleSphere[8] = vec3(-0.3577,-0.5301,-0.4358);
sampleSphere[9] = vec3(-0.3169, 0.1063, 0.0158);
sampleSphere[10] = vec3( 0.0103,-0.5869, 0.0046);
sampleSphere[11] = vec3(-0.0897,-0.4940, 0.3287);
sampleSphere[12] = vec3( 0.7119,-0.0154,-0.0918);
sampleSphere[13] = vec3(-0.0533, 0.0596,-0.5411);
sampleSphere[14] = vec3( 0.0352,-0.0631, 0.5460);
sampleSphere[15] = vec3(-0.4776, 0.2847,-0.0271);
float radiusDepth = ssaoRadius/depth;
float occlusion = 0.0;
for(int i=0; i < samples; i++)
{
vec3 ray = radiusDepth * reflect(sampleSphere[i], random);
vec3 hemiRay = position + sign(dot(ray, normal)) * ray;
float occDepth = texture2D(texture0, clamp(hemiRay.xy, 0.0, 1.0)).r;
float difference = depth - occDepth;
occlusion += step(ssaoFalloff, difference) * (1.0 - smoothstep(ssaoFalloff, ssaoArea, difference));
// float rangeCheck = abs(difference) < radiusDepth ? 1.0 : 0.0;
// occlusion += (occDepth <= position.z ? 1.0 : 0.0) * rangeCheck;
}
float ao = 1.0 - ssaoStrength * occlusion * (1.0 / float(samples));
gl_FragColor = vec4(clamp(ao + ssaoBase, 0.0, 1.0));
}
vec3 GetNormalFromDepth(float depth, vec2 uv)
{
vec2 offset1 = vec2(0.0,1.0/agk_resolution.y);
vec2 offset2 = vec2(1.0/agk_resolution.x,0.0);
float depth1 = texture2D(texture0, uv + offset1).r;
float depth2 = texture2D(texture0, uv + offset2).r;
vec3 p1 = vec3(offset1, depth1 - depth);
vec3 p2 = vec3(offset2, depth2 - depth);
vec3 normal = cross(p1, p2);
normal.z = -normal.z;
return normalize(normal);
}
I carefully checked my code and the code you (Rabbid76) created for JSFiddle and came across the if (depth > 0.0) statement which solved the problem... so you somehow answered my question and I would like to thank you and mark you for that

Atmosphere Scattering for Earth from space and on the ground

Please provide prompt how to make the atmosphere of the Earth so that it is visible from space and from the ground (as shown in the image)
a model of the earth:
Earth = new THREE.Mesh(new THREE.SphereGeometry(6700,32,32),ShaderMaterialEarth);
model of the cosmos:
cosmos= new THREE.Mesh(new THREE.SphereGeometry(50000,32,32),ShaderMaterialCosmos);
and a light source:
sun = new THREE.DirectionalLight();
where to start, just I do not know. Perhaps this should do ShaderMaterialCosmos, where to pass position of the camera, and calculate how should be painted pixel. But how?
I tried using the following but get zero vectors at the entrance of the fragment shader
http://http.developer.nvidia.com/GPUGems2/gpugems2_chapter16.html
vertexShader:
#define M_PI 3.1415926535897932384626433832795
const float ESun=1.0;
const float Kr = 0.0025;
const float Km = 0.0015;
const int nSamples = 2;
const float fSamples = 1.0;
const float fScaleDepth = 0.25;
varying vec2 vUv;
varying vec3 wPosition;
varying vec4 c0;
varying vec4 c1;
varying vec3 t0;
uniform vec3 v3CameraPos; , // The camera's current position
uniform vec3 v3LightDir; // Direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for RGB
uniform float fCameraHeight; // The camera's current height
const float fOuterRadius=6500.0; // The outer (atmosphere) radius
const float fInnerRadius=6371.0; // The inner (planetary) radius
const float fKrESun=Kr*ESun; // Kr * ESun
const float fKmESun=Km*ESun; // Km * ESun
const float fKr4PI=Kr*4.0*M_PI; // Kr * 4 * PI
const float fKm4PI=Km*4.0*M_PI; // Km * 4 * PI
const float fScale=1.0/(fOuterRadius-fInnerRadius); // 1 / (fOuterRadius - fInnerRadius)
const float fScaleOverScaleDepth= fScale / fScaleDepth; // fScale / fScaleDepth
const float fInvScaleDepth=1.0/0.25;
float getNearIntersection(vec3 v3Pos, vec3 v3Ray, float fDistance2, float fRadius2)
{
float B = 2.0 * dot(v3Pos, v3Ray);
float C = fDistance2 - fRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
return 0.5 * (-B - sqrt(fDet));
}
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main() {
// Get the ray from the camera to the vertex and its length (which
// is the far point of the ray passing through the atmosphere)
vec3 v3Pos = position.xyz;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with
// the outer atmosphere (point A in Figure 16-3)
float fNear = getNearIntersection(v3CameraPos, v3Ray, fCameraHeight*fCameraHeight, fOuterRadius*fOuterRadius);
// Calculate the ray's start and end positions in the atmosphere,
// then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray * fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-fInvScaleDepth);
float fStartOffset = fStartDepth * scale(fStartAngle);
// Initialize the scattering loop variables
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample points
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++) {
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightDir, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth * (scale(fLightAngle) * scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
wPosition = (modelMatrix * vec4(position,1.0)).xyz;
c0.rgb = v3FrontColor * (v3InvWavelength * fKrESun);
c1.rgb = v3FrontColor * fKmESun;
t0 = v3CameraPos - v3Pos;
vUv = uv;
}
fragmentShader:
float getMiePhase(float fCos, float fCos2, float g, float g2){
return 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos2) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
}
// Rayleigh phase function
float getRayleighPhase(float fCos2){
//return 0.75 + 0.75 * fCos2;
return 0.75 * (2.0 + 0.5 * fCos2);
}
varying vec2 vUv;
varying vec3 wPosition;
varying vec4 c0;
varying vec4 c1;
varying vec3 t0;
uniform vec3 v3LightDir;
uniform float g;
uniform float g2;
void main() {
float fCos = dot(v3LightDir, t0) / length(t0);
float fCos2 = fCos * fCos;
gl_FragColor = getRayleighPhase(fCos2) * c0 + getMiePhase(fCos, fCos2, g, g2) * c1;
gl_FragColor = c1;
}
Chapter 16 of GPU Gem 2 has nice explanation and illustration for achieving your goal in real time.
Basically you need to perform ray casting through the atmosphere layer and evaluate the light scattering.

GLSL Shadows with Perlin Noise

So I've recently gotten into using WebGL and more specifically writing GLSL Shaders and I have run into a snag while writing the fragment shader for my "water" shader which is derived from this tutorial.
What I'm trying to achieve is a stepped shading (Toon shading, cell shading...) effect on waves generated by my vertex shader but the fragment shader seems to treat the waves as though they are still a flat plane and the entire mesh is drawn as one solid color.
What am I missing here? The sphere works perfectly but flat surfaces are all shaded uniformly. I have the same problem if I use a cube. Each face on the cube is shaded independently but the entire face is given a solid color.
The Scene
This is how I have my test scene set up. I have two meshes using the same material - a sphere and a plane and a light source.
The Problem
As you can see the shader is working as expected on the sphere.
I enabled wireframe for this shot to show that the vertex shader (perlin noise) is working beautifully on the plane.
But when I turn the wireframe off you can see that the fragment shader seems to be receiving the same level of light uniformly across the entire plane creating this...
Rotating the plane to face the light source will change the color of the material but again the color is applied uniformly over the entire surface of the plane.
The Fragment Shader
In all it's script kid glory lol.
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize( vNormal );
// check the diffuse dot product against uBorder and adjust
// this diffuse value accordingly.
float diffuse = max( dot( normal, lVector ), 0.0);
if (diffuse > 0.95)
color = vec4(1.0,0.0,0.0,1.0);
else if (diffuse > 0.85)
color = vec4(0.9,0.0,0.0,1.0);
else if (diffuse > 0.75)
color = vec4(0.8,0.0,0.0,1.0);
else if (diffuse > 0.65)
color = vec4(0.7,0.0,0.0,1.0);
else if (diffuse > 0.55)
color = vec4(0.6,0.0,0.0,1.0);
else if (diffuse > 0.45)
color = vec4(0.5,0.0,0.0,1.0);
else if (diffuse > 0.35)
color = vec4(0.4,0.0,0.0,1.0);
else if (diffuse > 0.25)
color = vec4(0.3,0.0,0.0,1.0);
else if (diffuse > 0.15)
color = vec4(0.2,0.0,0.0,1.0);
else if (diffuse > 0.05)
color = vec4(0.1,0.0,0.0,1.0);
else
color = vec4(0.05,0.0,0.0,1.0);
gl_FragColor = color;
The Vertex Shader
vec3 mod289(vec3 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 mod289(vec4 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 permute(vec4 x)
{
return mod289(((x*34.0)+1.0)*x);
}
vec4 taylorInvSqrt(vec4 r)
{
return 1.79284291400159 - 0.85373472095314 * r;
}
vec3 fade(vec3 t) {
return t*t*t*(t*(t*6.0-15.0)+10.0);
}
// Classic Perlin noise
float cnoise(vec3 P)
{
vec3 Pi0 = floor(P); // Integer part for indexing
vec3 Pi1 = Pi0 + vec3(1.0); // Integer part + 1
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
// Classic Perlin noise, periodic variant
float pnoise(vec3 P, vec3 rep)
{
vec3 Pi0 = mod(floor(P), rep); // Integer part, modulo period
vec3 Pi1 = mod(Pi0 + vec3(1.0), rep); // Integer part + 1, mod period
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
varying vec2 vUv;
varying float noise;
uniform float time;
// for the cell shader
varying vec3 vNormal;
varying vec3 vViewPosition;
float turbulence( vec3 p ) {
float w = 100.0;
float t = -.5;
for (float f = 1.0 ; f <= 10.0 ; f++ ){
float power = pow( 2.0, f );
t += abs( pnoise( vec3( power * p ), vec3( 10.0, 10.0, 10.0 ) ) / power );
}
return t;
}
varying vec3 vertexWorldPos;
void main() {
vUv = uv;
// add time to the noise parameters so it's animated
noise = 10.0 * -.10 * turbulence( .5 * normal + time );
float b = 25.0 * pnoise( 0.05 * position + vec3( 2.0 * time ), vec3( 100.0 ) );
float displacement = - 10. - noise + b;
vec3 newPosition = position + normal * displacement;
gl_Position = projectionMatrix * modelViewMatrix * vec4( newPosition, 1.0 );
// for the cell shader effect
vNormal = normalize( normalMatrix * normal );
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
vViewPosition = -mvPosition.xyz;
}
Worth Mention
I am using the Three.js library
My light source is an instance of THREE.SpotLight
First of all, shadows are completely different. Your problem here is a lack of change in the per-vertex normal after displacement. Correcting this is not going to get you shadows, but your lighting will at least vary across your displaced geometry.
If you have access to partial derivatives, you can do this in the fragment shader. Otherwise, you are kind of out of luck in GL ES, due to a lack of vertex adjacency information. You could also compute per-face normals with a Geometry Shader, but that is not an option in WebGL.
This should be all of the necessary changes to implement this, note that it requires partial derivative support (optional extension in OpenGL ES 2.0).
Vertex Shader:
varying vec3 vertexViewPos; // NEW
void main() {
...
vec3 newPosition = position + normal * displacement;
vertexViewPos = (modelViewMatrix * vec4 (newPosition, 1.0)).xyz; // NEW
...
}
Fragment Shader:
#extension GL_OES_standard_derivatives : require
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
varying vec3 vertexViewPos; // NEW
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize(cross (dFdx (vertexViewPos), dFdy (vertexViewPos))); // UPDATED
...
}
To enable partial derivative support in WebGL you need to check the extension like this:
var ext = gl.getExtension("OES_standard_derivatives");
if (!ext) {
alert("OES_standard_derivatives does not exist on this machine");
return;
}
// proceed with the shaders above.

Webgl GLSL / Open GL ES 2.0

I'm curently porting an engine written for android and IOS devices using OpenGLES 2.0 to webgl and I stumbled uppon a problem using shaders. Most of the shaders written for the mobile app doesn't work on webGL. Example :
(the $ are replaced with a value by a preprocessor before being compiled )
Vertex Shader :
uniform mat4 u_mvpMatrix;
attribute vec4 a_position;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
varying highp vec2 MCPosition;
varying float radius1;
varying float radius2;
uniform int time;
const int delay = ($2*60)/1000;
const int animDuration = ($3*60)/1000;
void main()
{
gl_Position = u_mvpMatrix * a_position;
MCPosition = a_position.xy;
TexCoordOut = TexCoordIn;
int timer = time - delay;
radius1 = float(240 * (timer-26) / animDuration);
radius2 = float(240 * (timer-1) / animDuration);
}
Fragment Shader :
precision mediump float;
uniform vec2 spriteSize;
uniform vec2 spritePosition;
uniform lowp float alpha;
uniform lowp float brightness;
uniform sampler2D Texture;
uniform int time;
varying vec2 MCPosition;
varying vec2 TexCoordOut;
varying float radius1;
varying float radius2;
const float hexRadius = 7.0;
const float hexWidth = hexRadius*sqrt(3.0);
const float cos30 = hexRadius/hexWidth;
const float midHexRadius = hexRadius/2.0;
const float midHexWidth = hexWidth/2.0;
const vec2 centerEffect = vec2($1,$0);
const float SIDE = hexRadius * 3. / 2.;
const float HEIGHT = hexWidth;
const float SEMI_HEIGHT = HEIGHT / 2.;
const float RADIUS = hexRadius;
vec2 cellIndex(float i, float j)
{
float mX = i * SIDE;
float mY = SEMI_HEIGHT * (2. * j + mod(i, 2.));
return vec2(mX, mY);
}
void main()
{
vec2 center;
bool isOnEdge = false;
float x = MCPosition.y;
float y = MCPosition.x;
float ci = floor(x/SIDE);
float cx = mod(x, SIDE);
float isCiImpair = mod(ci, 2.);
float ty = y - isCiImpair * SEMI_HEIGHT;
float cj = floor( ty / HEIGHT);
float cy = ty - HEIGHT * cj;
float border = abs(RADIUS / 2. - RADIUS * cy / HEIGHT);
if (cx > border)
{
center = cellIndex(ci , cj);
if( cy < 1. || cy > (HEIGHT-1.) || (cx- border) < 1.0)
{
isOnEdge = true;
}
}
else
{
center = cellIndex(ci - 1., cj + isCiImpair - ((cy < SEMI_HEIGHT) ? 1. : 0.));
if( (border - cx ) < 1.0)
{
isOnEdge = true;
}
}
float distFromCenter = distance(centerEffect, center);
if(distFromCenter > radius2)
{
gl_FragColor = vec4(0.);
}
else
{
vec4 texColor = texture2D(Texture, TexCoordOut);
//filling is over
if(distFromCenter < radius1)
{
gl_FragColor = texColor;
}
else
{
float ratio = (radius2 - distFromCenter)/(radius2 - radius1);
gl_FragColor = vec4(texColor.a * ratio);
if(!isOnEdge)
{
gl_FragColor.rgb *= texColor.rgb;
}
}
gl_FragColor.rgb *= brightness;
}
}
It works like a charm on OpenGLES but on webGL I'm having reports about const float being initialized with non constant values. The operations I do always returns the same results when declaring a const.
An error occurred compiling the shaders: ERROR: 0:17: '=' : assigning non-constant to 'const mediump float'
ERROR: 0:18: '=' : assigning non-constant to 'const mediump float'
ERROR: 0:20: '=' : assigning non-constant to 'const mediump float'
ERROR: 0:24: '=' : assigning non-constant to 'const mediump float'
ERROR: 0:25: '=' : assigning non-constant to 'const mediump float'
Is there something I can do about it, or do I have to rewrite all shaders to match WebGL GLSL specifics ?
When I was working on webGL shaders, the only consts that were allowed were pure numbers and I still haven't found a way round. It seems that rewriting is the only option. :P
Why not to use defines for your int/floats (#define right in shader code)?
For me "const vec2" works fine. If it's not for you - move them to uniform (By the way, it seems that nVidia drivers treat consts as uniforms).

Resources