When i use TbloomEffect and TgloomEffect (Delphi component) under android/iOS (so with openGL) then it's produce only black & white output :( under windows (DX11) then it's work ok. I guess it's a bug in the delphi source code but i can't find what wrong in the original GLSL code below I extracted from the original delphi source:
TgloomEffect use :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP4;
float _TMP3;
vec3 _x0014;
vec3 _TMP15;
float _grey0022;
float _grey0028;
vec3 _TMP35;
uniform float _GloomIntensity;
uniform float _BaseIntensity;
uniform float _GloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _gloom;
vec3 _TMP10;
_color1 = texture2D(_Input, TEX0.xy);
_base = 1.0 - _color1.xyz/_color1.w;
_x0014 = (_base - 2.50000000E-001)/7.50000000E-001;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _x0014);
_TMP15 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_grey0022 = dot(_TMP15, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP3 = _grey0022 + _GloomSaturation*(_TMP15.x - _grey0022);
_gloom = vec3(_TMP3, _TMP3, _TMP3)*_GloomIntensity;
_grey0028 = dot(_base, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP3 = _grey0028 + _BaseSaturation*(_base.x - _grey0028);
_base = vec3(_TMP3, _TMP3, _TMP3)*_BaseIntensity;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _gloom);
_TMP35 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_base = _base*(1.0 - _TMP35);
_TMP10 = (1.0 - (_base + _gloom))*_color1.w;
_ret_0 = vec4(_TMP10.x, _TMP10.y, _TMP10.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
and TBloomEffect use :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP5;
float _TMP4;
vec3 _TMP3;
vec3 _TMP14;
vec3 _x0015;
float _grey0021;
float _grey0027;
vec3 _TMP34;
uniform float _BloomIntensity;
uniform float _BaseIntensity;
uniform float _BloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _bloom;
vec3 _TMP11;
_color1 = texture2D(_Input, TEX0.xy);
_base = _color1.xyz/_color1.w;
_x0015 = (_base - 2.50000000E-001)/7.50000000E-001;
_TMP3 = min(vec3( 1.0, 1.0, 1.0), _x0015);
_TMP14 = max(vec3( 0.0, 0.0, 0.0), _TMP3);
_grey0021 = dot(_TMP14, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP4 = _grey0021 + _BloomSaturation*(_TMP14.x - _grey0021);
_bloom = vec3(_TMP4, _TMP4, _TMP4)*_BloomIntensity;
_grey0027 = dot(_base, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP4 = _grey0027 + _BaseSaturation*(_base.x - _grey0027);
_base = vec3(_TMP4, _TMP4, _TMP4)*_BaseIntensity;
_TMP5 = min(vec3( 1.0, 1.0, 1.0), _bloom);
_TMP34 = max(vec3( 0.0, 0.0, 0.0), _TMP5);
_base = _base*(1.0 - _TMP34);
_TMP11 = (_base + _bloom)*_color1.w;
_ret_0 = vec4(_TMP11.x, _TMP11.y, _TMP11.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
What wrong in those 2 GLSL codes that make output only in black & white ?
because of a bug in delphi ...
good code is :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP4;
vec3 _TMP3;
vec3 _x0014;
vec3 _TMP15;
float _grey0022;
float _grey0028;
vec3 _TMP35;
uniform float _GloomIntensity;
uniform float _BaseIntensity;
uniform float _GloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _gloom;
vec3 _TMP10;
_color1 = texture2D(_Input, TEX0.xy);
_base = 1.0 - _color1.xyz/_color1.w;
_x0014 = (_base - 0.25)/0.75;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _x0014);
_TMP15 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_grey0022 = dot(_TMP15, vec3( 0.3, 0.59, 0.11));
_TMP3 = vec3(_grey0022, _grey0022, _grey0022) + _GloomSaturation*(_TMP15 - vec3(_grey0022, _grey0022, _grey0022));
_gloom = _TMP3*_GloomIntensity;
_grey0028 = dot(_base, vec3( 0.3, 0.59, 0.11));
_TMP3 = vec3(_grey0028, _grey0028, _grey0028) + _BaseSaturation*(_base - vec3(_grey0028, _grey0028, _grey0028));
_base = _TMP3*_BaseIntensity;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _gloom);
_TMP35 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_base = _base*(1.0 - _TMP35);
_TMP10 = (1.0 - (_base + _gloom))*_color1.w;
_ret_0 = vec4(_TMP10.x, _TMP10.y, _TMP10.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
Related
I am completely new to opengl es shader and cook-torrance,
the following shader only output red, no texture color processed for ambient light and point light.
the cube is for testing,no shader applied to it, the right side rocket with texture with no output,just red color. Any opengl es guy can help?
no texture only red
original texture before apply cook-torrance and output by gl_FragColor =texture2D(texture,vertTexCoord.st).rgb*vertColor;
But what make CookTorance() below fail?
fragment shader
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PI 3.14159265
//copper
uniform vec3 diffuseColor = vec3(0.75164,0.60648,0.22648);
uniform vec3 specularColor = vec3(0.256777,0.137622,0.086014);
uniform float F0 = 0.8;
uniform float roughness = 0.1;
uniform float k = 0.2;
//uniform vec3 lightColor = vec3(1, 1, 1);
uniform sampler2D texture;
varying vec4 varyvertColor;
varying vec3 ecNormal; //eye normal
varying vec3 lightDir;
varying vec4 varyambient;
varying vec4 varyspecular;
varying float varyshininess; //0.1
varying vec4 vertTexCoord;
varying vec4 vposition;
vec3 CookTorrance(vec3 materialDiffuseColor,
vec3 materialSpecularColor,
vec3 normal,
vec3 lightDir,
vec3 viewDir,
vec3 lightColor)
{
float NdotL = max(0, dot(normal, lightDir));
float Rs = 0.0;
if (NdotL > 0)
{
vec3 H = normalize(lightDir + viewDir);
float NdotH = max(0, dot(normal, H));
float NdotV = max(0, dot(normal, viewDir));
float VdotH = max(0, dot(lightDir, H));
// Fresnel reflectance
float F = pow(1.0 - VdotH, 5.0);
F *= (1.0 - F0);
F += F0;
// Microfacet distribution by Beckmann
float m_squared = roughness * roughness;
float r1 = 1.0 / (4.0 * m_squared * pow(NdotH, 4.0));
float r2 = (NdotH * NdotH - 1.0) / (m_squared * NdotH * NdotH);
float D = r1 * exp(r2);
// Geometric shadowing
float two_NdotH = 2.0 * NdotH;
float g1 = (two_NdotH * NdotV) / VdotH;
float g2 = (two_NdotH * NdotL) / VdotH;
float G = min(1.0, min(g1, g2));
Rs = (F * D * G) / (PI * NdotL * NdotV);
}
return materialDiffuseColor * lightColor * NdotL + lightColor * materialSpecularColor * NdotL * (k + Rs * (1.0 - k));
}
void main(){
vec3 direction = normalize(lightDir);
//vec3 normal = normalize(ecNormal);
vec3 normal = -normalize(ecNormal);
vec4 vertColor = varyambient +
vec4(CookTorrance(texture2D(texture,vertTexCoord.st).rgb*varyvertColor.xyz,
varyspecular.xyz*specularColor,
vposition.xyz,
direction,
normal,
varyvertColor.rgb),1.0);
gl_FragColor = vertColor;
}
is there any error in the above cook-torrance?
please help on it.
the vertex shader
uniform mat4 modelviewMatrix;
uniform mat4 transformMatrix;
uniform mat3 normalMatrix;
uniform mat4 texMatrix;
attribute vec4 position;
attribute vec4 color;
attribute vec3 normal;
attribute vec4 ambient;
attribute vec4 specular;
attribute float shininess;
attribute vec2 texCoord;
varying vec4 vposition;
varying vec4 vertColor;
varying vec3 ecNormal;
varying vec3 lightDir;
varying vec4 varyambient;
varying vec4 varyspecular;
varying float varyshininess;
varying vec4 varyvertColor;
varying vec4 vertTexCoord;
void main() {
// Vertex in clip coordinates
gl_Position = transformMatrix * position;
vposition = transformMatrix * position;
// Vertex in eye coordinates
vec3 ecVertex = vec3(modelviewMatrix * position);
// Normal vector in eye coordinates
ecNormal = normalize(normalMatrix * normal);
varyambient=ambient;
varyspecular=specular;
varyshininess=shininess;
varyvertColor=color;
vertTexCoord = texMatrix * vec4(texCoord, 1.0, 1.0);
}
I'm trying to rotate an image in webgl. If the texture has the same width as height there is no problem, but if width is for example 256px and height only 32px the image gets skewed.
It seems as if only the texture is rotating and not the vertices. However usually when only the texture is rotating it's corners gets clipped as they move outside the vertices. That doesn't happen here so I'm a bit confused.
Here is my vertex shader code:
precision lowp float;
attribute vec3 vertPosition;
attribute vec3 vertColor;
attribute vec2 aTextureCoord;
varying vec3 fragColor;
varying lowp vec2 vTextureCoord;
varying lowp vec2 vTextureCoordBg;
uniform vec2 uvOffsetBg;
uniform vec2 uvScaleBg;
uniform mat4 uPMatrix;
uniform vec2 uvOffset;
uniform vec2 uvScale;
uniform vec3 translation;
uniform vec3 scale;
uniform float rotateZ;
uniform vec2 vertPosFixAfterRotate;
void main()
{
fragColor = vertColor;
vTextureCoord = (vec4(aTextureCoord.x, aTextureCoord.y, 0, 1)).xy * uvScale + uvOffset;
vTextureCoordBg = (vec4(aTextureCoord, 0, 1)).xy * uvScaleBg + uvOffsetBg;
mat4 worldPosTrans = mat4(
vec4(scale.x*cos(rotateZ), scale.y*-sin(rotateZ), 0, 0),
vec4(scale.x*sin(rotateZ), scale.y*cos(rotateZ), 0, 0),
vec4(0, 0, scale.z, 0),
vec4(translation.x, translation.y, translation.z, 1));
gl_Position = (uPMatrix * worldPosTrans) * vec4(vertPosition.x + vertPosFixAfterRotate.x, vertPosition.y + vertPosFixAfterRotate.y, vertPosition.z, 1.0);
}
The rotation is sent from javascript to the shader through the rotateZ uniform.
You have to do the scaling before the rotation:
Scale matrix:
mat4 sm = mat4(
vec4(scale.x, 0.0, 0.0, 0.0),
vec4(0.0, scale.y, 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Rotation matrix:
mat4 rm = mat4(
vec4(cos(rotateZ), -sin(rotateZ), 0.0, 0.0),
vec4(sin(rotateZ), cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Translation matrix:
mat4 tm = mat4(
vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, 0.0, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));
Model transformtion:
mat4 worldPosTrans = tm * rm * sm;
See the result and focus on scale.x and scale.y, in compare to the code snippet in your question:
mat4 worldPosTrans = mat4(
vec4(scale.x * cos(rotateZ), scale.x * -sin(rotateZ), 0.0, 0.0),
vec4(scale.y * sin(rotateZ), scale.y * cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));
I try to implement Pure Depth SSAO, using this tutorial, into an OpenGL ES 2.0 engine.
Now I experience flickering, which looks like I read from somewhere, where I have no data.
Can you see where I made a mistake or do you have an idea how to solve the flickering problem ? I need this to run on mobile and html5 with forward rendering, thats why I use the depth only version of SSAO.
Many thanks
Video: Youtube
GLSL Code:
uniform sampler2D texture0;
uniform sampler2D texture1;
varying vec2 uvVarying;
vec3 GetNormalFromDepth(float depth, vec2 uv);
uniform mediump vec2 agk_resolution;
uniform float ssaoStrength;
uniform float ssaoBase;
uniform float ssaoArea;
uniform float ssaoFalloff;
uniform float ssaoRadius;
const int samples = 16;
vec3 sampleSphere[samples];
void main()
{
highp float depth = texture2D(texture0, uvVarying).r;
vec3 random = normalize( texture2D(texture1, uvVarying * agk_resolution / 64.0).rgb );
vec3 position = vec3(uvVarying, depth);
vec3 normal = GetNormalFromDepth(depth, uvVarying);
sampleSphere[0] = vec3( 0.5381, 0.1856,-0.4319);
sampleSphere[1] = vec3( 0.1379, 0.2486, 0.4430);
sampleSphere[2] = vec3( 0.3371, 0.5679,-0.0057);
sampleSphere[3] = vec3(-0.6999,-0.0451,-0.0019);
sampleSphere[3] = vec3( 0.0689,-0.1598,-0.8547);
sampleSphere[5] = vec3( 0.0560, 0.0069,-0.1843);
sampleSphere[6] = vec3(-0.0146, 0.1402, 0.0762);
sampleSphere[7] = vec3( 0.0100,-0.1924,-0.0344);
sampleSphere[8] = vec3(-0.3577,-0.5301,-0.4358);
sampleSphere[9] = vec3(-0.3169, 0.1063, 0.0158);
sampleSphere[10] = vec3( 0.0103,-0.5869, 0.0046);
sampleSphere[11] = vec3(-0.0897,-0.4940, 0.3287);
sampleSphere[12] = vec3( 0.7119,-0.0154,-0.0918);
sampleSphere[13] = vec3(-0.0533, 0.0596,-0.5411);
sampleSphere[14] = vec3( 0.0352,-0.0631, 0.5460);
sampleSphere[15] = vec3(-0.4776, 0.2847,-0.0271);
float radiusDepth = ssaoRadius/depth;
float occlusion = 0.0;
for(int i=0; i < samples; i++)
{
vec3 ray = radiusDepth * reflect(sampleSphere[i], random);
vec3 hemiRay = position + sign(dot(ray, normal)) * ray;
float occDepth = texture2D(texture0, clamp(hemiRay.xy, 0.0, 1.0)).r;
float difference = depth - occDepth;
occlusion += step(ssaoFalloff, difference) * (1.0 - smoothstep(ssaoFalloff, ssaoArea, difference));
// float rangeCheck = abs(difference) < radiusDepth ? 1.0 : 0.0;
// occlusion += (occDepth <= position.z ? 1.0 : 0.0) * rangeCheck;
}
float ao = 1.0 - ssaoStrength * occlusion * (1.0 / float(samples));
gl_FragColor = vec4(clamp(ao + ssaoBase, 0.0, 1.0));
}
vec3 GetNormalFromDepth(float depth, vec2 uv)
{
vec2 offset1 = vec2(0.0,1.0/agk_resolution.y);
vec2 offset2 = vec2(1.0/agk_resolution.x,0.0);
float depth1 = texture2D(texture0, uv + offset1).r;
float depth2 = texture2D(texture0, uv + offset2).r;
vec3 p1 = vec3(offset1, depth1 - depth);
vec3 p2 = vec3(offset2, depth2 - depth);
vec3 normal = cross(p1, p2);
normal.z = -normal.z;
return normalize(normal);
}
I carefully checked my code and the code you (Rabbid76) created for JSFiddle and came across the if (depth > 0.0) statement which solved the problem... so you somehow answered my question and I would like to thank you and mark you for that
So I've recently gotten into using WebGL and more specifically writing GLSL Shaders and I have run into a snag while writing the fragment shader for my "water" shader which is derived from this tutorial.
What I'm trying to achieve is a stepped shading (Toon shading, cell shading...) effect on waves generated by my vertex shader but the fragment shader seems to treat the waves as though they are still a flat plane and the entire mesh is drawn as one solid color.
What am I missing here? The sphere works perfectly but flat surfaces are all shaded uniformly. I have the same problem if I use a cube. Each face on the cube is shaded independently but the entire face is given a solid color.
The Scene
This is how I have my test scene set up. I have two meshes using the same material - a sphere and a plane and a light source.
The Problem
As you can see the shader is working as expected on the sphere.
I enabled wireframe for this shot to show that the vertex shader (perlin noise) is working beautifully on the plane.
But when I turn the wireframe off you can see that the fragment shader seems to be receiving the same level of light uniformly across the entire plane creating this...
Rotating the plane to face the light source will change the color of the material but again the color is applied uniformly over the entire surface of the plane.
The Fragment Shader
In all it's script kid glory lol.
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize( vNormal );
// check the diffuse dot product against uBorder and adjust
// this diffuse value accordingly.
float diffuse = max( dot( normal, lVector ), 0.0);
if (diffuse > 0.95)
color = vec4(1.0,0.0,0.0,1.0);
else if (diffuse > 0.85)
color = vec4(0.9,0.0,0.0,1.0);
else if (diffuse > 0.75)
color = vec4(0.8,0.0,0.0,1.0);
else if (diffuse > 0.65)
color = vec4(0.7,0.0,0.0,1.0);
else if (diffuse > 0.55)
color = vec4(0.6,0.0,0.0,1.0);
else if (diffuse > 0.45)
color = vec4(0.5,0.0,0.0,1.0);
else if (diffuse > 0.35)
color = vec4(0.4,0.0,0.0,1.0);
else if (diffuse > 0.25)
color = vec4(0.3,0.0,0.0,1.0);
else if (diffuse > 0.15)
color = vec4(0.2,0.0,0.0,1.0);
else if (diffuse > 0.05)
color = vec4(0.1,0.0,0.0,1.0);
else
color = vec4(0.05,0.0,0.0,1.0);
gl_FragColor = color;
The Vertex Shader
vec3 mod289(vec3 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 mod289(vec4 x)
{
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 permute(vec4 x)
{
return mod289(((x*34.0)+1.0)*x);
}
vec4 taylorInvSqrt(vec4 r)
{
return 1.79284291400159 - 0.85373472095314 * r;
}
vec3 fade(vec3 t) {
return t*t*t*(t*(t*6.0-15.0)+10.0);
}
// Classic Perlin noise
float cnoise(vec3 P)
{
vec3 Pi0 = floor(P); // Integer part for indexing
vec3 Pi1 = Pi0 + vec3(1.0); // Integer part + 1
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
// Classic Perlin noise, periodic variant
float pnoise(vec3 P, vec3 rep)
{
vec3 Pi0 = mod(floor(P), rep); // Integer part, modulo period
vec3 Pi1 = mod(Pi0 + vec3(1.0), rep); // Integer part + 1, mod period
Pi0 = mod289(Pi0);
Pi1 = mod289(Pi1);
vec3 Pf0 = fract(P); // Fractional part for interpolation
vec3 Pf1 = Pf0 - vec3(1.0); // Fractional part - 1.0
vec4 ix = vec4(Pi0.x, Pi1.x, Pi0.x, Pi1.x);
vec4 iy = vec4(Pi0.yy, Pi1.yy);
vec4 iz0 = Pi0.zzzz;
vec4 iz1 = Pi1.zzzz;
vec4 ixy = permute(permute(ix) + iy);
vec4 ixy0 = permute(ixy + iz0);
vec4 ixy1 = permute(ixy + iz1);
vec4 gx0 = ixy0 * (1.0 / 7.0);
vec4 gy0 = fract(floor(gx0) * (1.0 / 7.0)) - 0.5;
gx0 = fract(gx0);
vec4 gz0 = vec4(0.5) - abs(gx0) - abs(gy0);
vec4 sz0 = step(gz0, vec4(0.0));
gx0 -= sz0 * (step(0.0, gx0) - 0.5);
gy0 -= sz0 * (step(0.0, gy0) - 0.5);
vec4 gx1 = ixy1 * (1.0 / 7.0);
vec4 gy1 = fract(floor(gx1) * (1.0 / 7.0)) - 0.5;
gx1 = fract(gx1);
vec4 gz1 = vec4(0.5) - abs(gx1) - abs(gy1);
vec4 sz1 = step(gz1, vec4(0.0));
gx1 -= sz1 * (step(0.0, gx1) - 0.5);
gy1 -= sz1 * (step(0.0, gy1) - 0.5);
vec3 g000 = vec3(gx0.x,gy0.x,gz0.x);
vec3 g100 = vec3(gx0.y,gy0.y,gz0.y);
vec3 g010 = vec3(gx0.z,gy0.z,gz0.z);
vec3 g110 = vec3(gx0.w,gy0.w,gz0.w);
vec3 g001 = vec3(gx1.x,gy1.x,gz1.x);
vec3 g101 = vec3(gx1.y,gy1.y,gz1.y);
vec3 g011 = vec3(gx1.z,gy1.z,gz1.z);
vec3 g111 = vec3(gx1.w,gy1.w,gz1.w);
vec4 norm0 = taylorInvSqrt(vec4(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110)));
g000 *= norm0.x;
g010 *= norm0.y;
g100 *= norm0.z;
g110 *= norm0.w;
vec4 norm1 = taylorInvSqrt(vec4(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111)));
g001 *= norm1.x;
g011 *= norm1.y;
g101 *= norm1.z;
g111 *= norm1.w;
float n000 = dot(g000, Pf0);
float n100 = dot(g100, vec3(Pf1.x, Pf0.yz));
float n010 = dot(g010, vec3(Pf0.x, Pf1.y, Pf0.z));
float n110 = dot(g110, vec3(Pf1.xy, Pf0.z));
float n001 = dot(g001, vec3(Pf0.xy, Pf1.z));
float n101 = dot(g101, vec3(Pf1.x, Pf0.y, Pf1.z));
float n011 = dot(g011, vec3(Pf0.x, Pf1.yz));
float n111 = dot(g111, Pf1);
vec3 fade_xyz = fade(Pf0);
vec4 n_z = mix(vec4(n000, n100, n010, n110), vec4(n001, n101, n011, n111), fade_xyz.z);
vec2 n_yz = mix(n_z.xy, n_z.zw, fade_xyz.y);
float n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x);
return 2.2 * n_xyz;
}
varying vec2 vUv;
varying float noise;
uniform float time;
// for the cell shader
varying vec3 vNormal;
varying vec3 vViewPosition;
float turbulence( vec3 p ) {
float w = 100.0;
float t = -.5;
for (float f = 1.0 ; f <= 10.0 ; f++ ){
float power = pow( 2.0, f );
t += abs( pnoise( vec3( power * p ), vec3( 10.0, 10.0, 10.0 ) ) / power );
}
return t;
}
varying vec3 vertexWorldPos;
void main() {
vUv = uv;
// add time to the noise parameters so it's animated
noise = 10.0 * -.10 * turbulence( .5 * normal + time );
float b = 25.0 * pnoise( 0.05 * position + vec3( 2.0 * time ), vec3( 100.0 ) );
float displacement = - 10. - noise + b;
vec3 newPosition = position + normal * displacement;
gl_Position = projectionMatrix * modelViewMatrix * vec4( newPosition, 1.0 );
// for the cell shader effect
vNormal = normalize( normalMatrix * normal );
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
vViewPosition = -mvPosition.xyz;
}
Worth Mention
I am using the Three.js library
My light source is an instance of THREE.SpotLight
First of all, shadows are completely different. Your problem here is a lack of change in the per-vertex normal after displacement. Correcting this is not going to get you shadows, but your lighting will at least vary across your displaced geometry.
If you have access to partial derivatives, you can do this in the fragment shader. Otherwise, you are kind of out of luck in GL ES, due to a lack of vertex adjacency information. You could also compute per-face normals with a Geometry Shader, but that is not an option in WebGL.
This should be all of the necessary changes to implement this, note that it requires partial derivative support (optional extension in OpenGL ES 2.0).
Vertex Shader:
varying vec3 vertexViewPos; // NEW
void main() {
...
vec3 newPosition = position + normal * displacement;
vertexViewPos = (modelViewMatrix * vec4 (newPosition, 1.0)).xyz; // NEW
...
}
Fragment Shader:
#extension GL_OES_standard_derivatives : require
uniform vec3 uMaterialColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform float uKd;
uniform float uBorder;
varying vec3 vNormal;
varying vec3 vViewPosition;
varying vec3 vertexViewPos; // NEW
void main() {
vec4 color;
// compute direction to light
vec4 lDirection = viewMatrix * vec4( uDirLightPos, 0.0 );
vec3 lVector = normalize( lDirection.xyz );
// N * L. Normal must be normalized, since it's interpolated.
vec3 normal = normalize(cross (dFdx (vertexViewPos), dFdy (vertexViewPos))); // UPDATED
...
}
To enable partial derivative support in WebGL you need to check the extension like this:
var ext = gl.getExtension("OES_standard_derivatives");
if (!ext) {
alert("OES_standard_derivatives does not exist on this machine");
return;
}
// proceed with the shaders above.
I want to write a shader that creates a reflection of an image similiar to the ones used for coverflows.
// Vertex Shader
uniform highp mat4 u_modelViewMatrix;
uniform highp mat4 u_projectionMatrix;
attribute highp vec4 a_position;
attribute lowp vec4 a_color;
attribute highp vec2 a_texcoord;
varying lowp vec4 v_color;
varying highp vec2 v_texCoord;
mat4 rot = mat4( -1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position * rot;
v_color = a_color;
v_texCoord = a_texcoord;
}
// Fragment Shader
varying highp vec2 v_texCoord;
uniform sampler2D u_texture0;
uniform int slices;
void main()
{
lowp vec3 w = vec3(1.0,1.0,1.0);
lowp vec3 b = vec3(0.0,0.0,0.0);
lowp vec3 mix = mix(b, w, (v_texCoord.y-(float(slices)/10.0)));
gl_FragColor = texture2D(u_texture0,v_texCoord) * vec4(mix, 1.0);
}
But this shader is creating the following:
current result
And I dont know how to "flip" the image horizontally and I tried so many different parameters in the rotation matrix (I even tried to use a so called "mirror matrix") but I dont know how to reflect the image on the bottom of original image.
If you're talking about what images.google.com returns for "coverflow" result, then you don't need rotation matrix at all.
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position;
v_color = a_color;
v_texCoord = vec2(a_texcoord.x, 1.0 - a_texcoord.y);
}
Simply flip it vertically.
If you insist on using matrix and want to make a "mirror" shader (the one that takes it object, and puts it under "floor" to make reflection) then you need mirror matrix (don't forget to adjust frontface/backface culling):
mat4(1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
AND you must know where the floor is.
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * (a_position * mirrorMatrix - floor);
Alternatively you could put floor translation into same matrix. Basically, to mirror against arbitrary height, you need to combine three transforms (pseudocode).
translate(0, -floorHeight, 0) * scale(1, -1, 1) * translate(0, floorHeight, 0).
and put them into your matrix.
Also it might make sense to split modelView matrix into "model"(object/world) and "view" matrices. This way it'll be easier to perform transformations like these.