Three.js: Black artifacts using SubSurface Scattering shader - three.js

I would like to use a ShaderMaterial in Three.js that allows some light to go through. As I learned recently, the effect I need is called "sub-Surface Scattering". I found several examples, but only a few allow real-time calculations (without additional mapping). The closest one is shown in this snippet:
var container;
var camera, scene, renderer;
var sssMesh;
var lightSourceMesh;
var sssUniforms;
var clock = new THREE.Clock();
init();
animate();
function init() {
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(40, window.innerWidth / window.innerHeight, 1, 3000);
camera.position.z = 4;
camera.position.y = 2;
camera.rotation.x = -0.45;
scene = new THREE.Scene();
var boxGeometry = new THREE.CubeGeometry(0.75, 0.75, 0.75);
var lightSourceGeometry = new THREE.CubeGeometry(0.1, 0.1, 0.1);
sssUniforms = {
u_lightPos: {
type: "v3",
value: new THREE.Vector3()
}
};
var sssMaterial = new THREE.ShaderMaterial({
uniforms: sssUniforms,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragment_shader').textContent
});
var lightSourceMaterial = new THREE.MeshBasicMaterial();
sssMesh = new THREE.Mesh(boxGeometry, sssMaterial);
sssMesh.position.x = 0;
sssMesh.position.y = 0;
scene.add(sssMesh);
lightSourceMesh = new THREE.Mesh(lightSourceGeometry, lightSourceMaterial);
lightSourceMesh.position.x = 0;
lightSourceMesh.position.y = 0;
scene.add(lightSourceMesh);
renderer = new THREE.WebGLRenderer();
container.appendChild(renderer.domElement);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function onWindowResize(event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
var delta = clock.getDelta();
var lightHeight = Math.sin(clock.elapsedTime * 1.0) * 0.5 + 0.7;
lightSourceMesh.position.y = lightHeight;
sssUniforms.u_lightPos.value.y = lightHeight;
sssMesh.rotation.y += delta * 0.5;
renderer.render(scene, camera);
}
body {
color: #ffffff;
background-color: #050505;
margin: 0px;
overflow: hidden;
}
<script src="http://threejs.org/build/three.min.js"></script>
<div id="container"></div>
<script id="fragment_shader" type="x-shader/x-fragment">
varying vec3 v_fragmentPos;
varying vec3 v_normal;
uniform vec3 u_lightPos;
void main(void)
{
vec3 _LightColor0 = vec3(1.0,0.5,0.5);
float _LightIntensity0 = 0.2;
vec3 translucencyColor = vec3(0.8,0.2,0.2);
vec3 toLightVector = u_lightPos - v_fragmentPos;
float lightDistanceSQ = dot(toLightVector, toLightVector);
vec3 lightDir = normalize(toLightVector);
float ndotl = max(0.0, dot(v_normal, lightDir));
float inversendotl = step(0.0, dot(v_normal, -lightDir));
vec3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0;
vec3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0;
vec3 final = subsurfacecolor + lightColor;
gl_FragColor=vec4(final,1.0);
}
</script>
<script id="vertexShader" type="x-shader/x-vertex">
varying vec3 v_fragmentPos;
varying vec3 v_normal;
void main()
{
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
v_fragmentPos = (modelMatrix * vec4( position, 1.0 )).xyz;
v_normal = (modelMatrix * vec4( normal, 0.0 )).xyz;
gl_Position = projectionMatrix * mvPosition;
}
</script>
The code above works perfect on a simple geometry (a cube in this case). But it draws some strange black faces on more complex meshes. The final objetive is to "iluminate" heightmap terrains, but applying this same code, I get this:
As you can see, the "glass-effect" is very nice on the lightened areas, but on the darker ones (where the light doesn´t reach so well) it draws some full black faces that I don´t know how to avoid.
I consider myself a medium-level Three.js user, but this is the first time I play with shaders, and is not being that easy, to be fair. Everytime I change something on the shaders code, the result is a visual garbage. The only single thing I changed properly are the light colors (both front and back). I also tried to increase the light intensity, but this doesn´t work, it only "burns" more the shader, making even more black faces to appear.
Can anyone point me on the right direction? What is this effect called on the first place? I don´t even know how to search about it on WebGL resources. Any help would be appreciated!
EDIT: It seems that making the terrain thicker (with a higher Z scale) solves the issue. Maybe this has something to do with the angle of the light against the faces? This also happens in the original snippet just when the light enters the cube (you can see a full black face in a frame). Or maybe I´m just talking nonsense. This is easily the hardest piece of code I faced in years, and it´s just 10 lines!! I just want to see the shader look as good in the original scale as it looks in the thicker one. But the complexity of the physics involved in that formulas is beyond me.

There is a few places where you potentially divide by zero in your pixel shader, which often shows up as black (line 3 and 4):
float ndotl = max(0.0, dot(v_normal, lightDir));
float inversendotl = step(0.0, dot(v_normal, -lightDir));
vec3 lightColor = _LightColor0.rgb * ndotl / lightDistanceSQ * _LightIntensity0;
vec3 subsurfacecolor = translucencyColor.rgb * inversendotl / lightDistanceSQ * _LightIntensity0;
vec3 final = subsurfacecolor + lightColor;
both ndotl and inversendotl can have a value of 0 (and pretty often, since you clamp them in both cases). Either of the subsequent terms can then be divided by zero, rendering them black. Try commenting out either subsurfacecolor or lightColor to isolate the faulty one (might be both).
max() can be non-zero by just having a tiny delta: max(0.0001, x)
step() can be solved by replacing it with a static branching, which would also have the benefit of getting rid of that extra dot product, since you just negate a vector, the result is just the opposite.

Related

How can I make waves from the center of a Plane Geometry in Three.JS using the vertex shader?

I've been learning Three.js and I can't seem to wrap my head around shaders. I have an idea of what I want, and I know the mathematical tools within the GLSL language and what they do in simple terms, but I don't understand how they work together.
I have a plane geometry with a shader material, I want to be able to create waves from the center of the vertex shader, but I am unsure how to accomplish this.
Also, if there is a course or documentation you can provide that could explain simple concepts regarding vertex and fragment shaders that would be great!
This is what I have done so far:
varying vec2 vUv;
varying float vuTime;
varying float vElevation;
uniform float uTime;
void main(){
vec4 modelPosition = modelMatrix * vec4(position, 1.0);
float elevation = sin(modelPosition.x * 10.0 - uTime) * 0.1;
modelPosition.y += elevation;
vec4 viewPosition = viewMatrix * modelPosition;
vec4 projectedPosition = projectionMatrix * viewPosition;
gl_Position = projectedPosition;
vuTime = uTime;
vUv = uv;
vElevation = elevation;
}
I have set up a simple animation using the sin function and a time variable passed to the shader which creates a simple wave effect without the use of noise. I am trying to create a circular wave stemming from the center of the plane geometry.
What I THINK I have to do is use PI to offset the position away from the center while the wave is moving with uTime. To get to the center of the Plane geometry I need to offset the position with 0.5 float.
That is my understanding right now and I would love to know if I'm correct in my thinking or what a correct way is of accomplishing this.
I also am passing the varying variable to the fragment shader to control the color at the elevation.
Thanks for any help you guys provide; I appreciate it!
In your shader code, try to change this line
float elevation = sin(modelPosition.x * 10.0 - uTime) * 0.1;
to this
float elevation = sin(length(modelPosition.xz) * 10.0 - uTime) * 0.1;
You can use either UV coords or position.
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 1000);
camera.position.set(0, 10, 10).setLength(10);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new THREE.OrbitControls(camera, renderer.domElement);
scene.add(new THREE.GridHelper(10, 10, "magenta", "yellow"));
let g = new THREE.PlaneGeometry(10, 10, 50, 50);
let m = new THREE.ShaderMaterial({
wireframe: true,
uniforms: {
time: {value: 0},
color: {value: new THREE.Color("aqua")}
},
vertexShader:`
#define PI 3.1415926
#define PI2 PI*2.
uniform float time;
void main(){
vec3 pos = position;
pos.z = sin((length(uv - 0.5) - time) * 6. * PI2);
gl_Position = projectionMatrix * modelViewMatrix * vec4(pos, 1.);
}
`,
fragmentShader:`
uniform vec3 color;
void main(){
gl_FragColor = vec4(color, 1.);
}
`
});
let o = new THREE.Mesh(g, m);
o.rotation.x = -Math.PI * 0.5;
scene.add(o);
let clock = new THREE.Clock();
renderer.setAnimationLoop(() => {
let t = clock.getElapsedTime();
m.uniforms.time.value = t * 0.1;
renderer.render(scene, camera);
});
body{
overflow: hidden;
margin: 0;
}
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>

Push particles away from mouseposition in glsl and three js

I have the following setup for my THREE.Points Object:
this.particleGeometry = new THREE.BufferGeometry()
this.particleMaterial = new THREE.ShaderMaterial(
{
vertexShader: vshader,
fragmentShader: fshader,
blending: THREE.AdditiveBlending,
depthWrite: false,
uniforms: {
uTime: new THREE.Uniform(0),
uMousePosition: this.mousePosition
}
}
)
and then some code to place points in the BufferGeometry on a sphere. That is working fine.
I also set up a Raycaster to track the mouse position intersecting a hidden plane and then update the uniform uMousePosition accordingly. That also works fine, I get the mouse position sent to my vertex shader.
Now I am trying to make the particles that are in a certain distance d to the mouse push away from it where the closest ones are pushed most of course, and also apply a gravity back to their original position to restore everything after time.
So here is what I have in my vertex shader:
void main() {
float lerp(float a, float b, float amount) {
return a + (b - a) * amount;
}
void main() {
vec3 p = position;
float dist = min(distance(p, mousePosition), 1.);
float lerpFactor = .2;
p.x = lerp(p.x, position.x * dist, lerpFactor);
p.y = lerp(p.y, position.y * dist, lerpFactor);
p.z = lerp(p.z, position.z * dist, lerpFactor);//Mouse is always in z=0
vec4 mvPosition = modelViewMatrix * vec4(p, 1.);
gl_PointSize = 30. * (1. / -mvPosition.z );
gl_Position = projectionMatrix * mvPosition;
}
}
And here is what it looks like when the mouse is outside the sphere (added a small sphere that moves with the mouseposition to indicate the mouseposition)
And here when the mouse is inside:
Outside already looks kind of correct, but mouse inside only moves the particles closer back to their original position, where it should push them further outside instead. I guess I somehow have to determine the direction of the distance.
Also, the lerp method does not lerp, the particles directly jump to their position.
So I wonder how I get the correct distance to the mouse to always move the particles in a certain area and also how to animate the lerp / gravity effect.
That's how you could do it as a first approximation:
body{
overflow: hidden;
margin: 0;
}
<script type="module">
import * as THREE from "https://cdn.skypack.dev/three#0.136.0";
import {OrbitControls} from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/controls/OrbitControls.js";
import * as BufferGeometryUtils from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/utils/BufferGeometryUtils.js";
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 100);
camera.position.set(0, 0, 10);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new OrbitControls(camera, renderer.domElement);
let marker = new THREE.Mesh(new THREE.SphereGeometry(0.5, 16, 8), new THREE.MeshBasicMaterial({color: "red", wireframe: true}));
scene.add(marker);
let g = new THREE.IcosahedronGeometry(4, 20);
g = BufferGeometryUtils.mergeVertices(g);
let uniforms = {
mousePos: {value: new THREE.Vector3()}
}
let m = new THREE.PointsMaterial({
size: 0.1,
onBeforeCompile: shader => {
shader.uniforms.mousePos = uniforms.mousePos;
shader.vertexShader = `
uniform vec3 mousePos;
${shader.vertexShader}
`.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
vec3 seg = position - mousePos;
vec3 dir = normalize(seg);
float dist = length(seg);
if (dist < 2.){
float force = clamp(1. / (dist * dist), 0., 1.);
transformed += dir * force;
}
`
);
console.log(shader.vertexShader);
}
});
let p = new THREE.Points(g, m);
scene.add(p);
let clock = new THREE.Clock();
renderer.setAnimationLoop( _ => {
let t = clock.getElapsedTime();
marker.position.x = Math.sin(t * 0.5) * 5;
marker.position.y = Math.cos(t * 0.3) * 5;
uniforms.mousePos.value.copy(marker.position);
renderer.render(scene, camera);
})
</script>

Particle system design using Three.js and Shader

I'm very new to this community. As i'm asking question if there is something i claim not right, please correct me.
Now to the point, i'm design a particle system using Three.js library, particularly i'm using THREE.Geometry() and control the vertex using shader. I want my particle movement restricted inside a box, which means when a particle crosses over a face of the box, it new position will be at the opposite side of that face.
Here's how i approach, in the vertex shader:
uniform float elapsedTime;
void main() {
gl_PointSize = 3.2;
vec3 pos = position;
pos.y -= elapsedTime*2.1;
if( pos.y < -100.0) {
pos.y = 100.0;
}
gl_Position = projectionMatrix * modelViewMatrix * vec4(pos, 1.0 );
}
The ellapsedTime is sent from javascript animation loop via uniform. And the y position of each vertex will be update corresponding to the time. As a test, i want if a particle is lower than the bottom plane ( y = -100) it will move to the top plane. That was my plan. And this is the result after they all reach the bottom:
Start to fall
After reach the bottom
So, what am i missing here?
You can achieve it, using mod function:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 1000);
camera.position.set(0, 0, 300);
var renderer = new THREE.WebGLRenderer({
antialis: true
});
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
var gridTop = new THREE.GridHelper(200, 10);
gridTop.position.y = 100;
var gridBottom = new THREE.GridHelper(200, 10);
gridBottom.position.y = -100;
scene.add(gridTop, gridBottom);
var pts = [];
for (let i = 0; i < 1000; i++) {
pts.push(new THREE.Vector3(Math.random() - 0.5, Math.random() - 0.5, Math.random() - 0.5).multiplyScalar(100));
}
var geom = new THREE.BufferGeometry().setFromPoints(pts);
var mat = new THREE.PointsMaterial({
size: 2,
color: "aqua"
});
var uniforms = {
time: {
value: 0
},
highY: {
value: 100
},
lowY: {
value: -100
}
}
mat.onBeforeCompile = shader => {
shader.uniforms.time = uniforms.time;
shader.uniforms.highY = uniforms.highY;
shader.uniforms.lowY = uniforms.lowY;
console.log(shader.vertexShader);
shader.vertexShader = `
uniform float time;
uniform float highY;
uniform float lowY;
` + shader.vertexShader;
shader.vertexShader = shader.vertexShader.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
float totalY = highY - lowY;
transformed.y = highY - mod(highY - (transformed.y - time * 20.), totalY);
`
);
}
var points = new THREE.Points(geom, mat);
scene.add(points);
var clock = new THREE.Clock();
renderer.setAnimationLoop(() => {
uniforms.time.value = clock.getElapsedTime();
renderer.render(scene, camera);
});
body {
overflow: hidden;
margin: 0;
}
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>
You can not change state in a shader. vertex shaders only output is gl_Position (to generate points/lines/triangles) and varyings that get passed to the fragment shader. Fragment shader's only output is gl_FragColor (in general). So trying to change pos.y will do nothing. The moment the shader exits your change is forgotten.
For your particle code though you could make the position a repeating function of the time
const float duration = 5.0;
float t = fract(elapsedTime / duration);
pos.y = mix(-100.0, 100.0, t);
Assuming elapsedTime is in seconds then pos.y will go from -100 to 100 over 5 seconds and repeat.
Note in this case all the particles will fall at the same time. You could add an attribute to give them each a different time offsets or you could work their position into your own formula. Related to that you might find this article useful.
You could also do the particle movement in JavaScript like this example and this one, updating the positions in the Geometry (or better, BufferGeometry)
Yet another solution is to do the movement in a separate shader by storing the positions in a texture and updating them to a new texture. Then using that texture as input another set of shaders that draws particles.

How to mix / blend between 2 vertex positions based on distance from camera?

I'm trying to mix / blend between 2 different vertex positions depending on the distance from the camera. Specifically, I'm trying to create an effect that blends between a horizontal plane closer to the camera and a vertical plane in the distance. The result should be a curved plane going away and up from the current camera position.
I want to blend from this (a plane flat on the ground):
To this (the same plane, just rotated 90 degrees):
The implementation I have so far feels close but I just can't put my finger on what pieces I need to finish it. I took an approach from a similar Tangram demo (shader code), however I'm unable to get results anywhere near this. The Tangram example is also using a complete different setup to what I'm using in Three.js so I've not been able to replicate everything.
This is what I have so far: https://jsfiddle.net/robhawkes/a97tu864/
varying float distance;
mat4 rotateX(float rotationX) {
return mat4(
vec4(1.0,0.0,0.0,0.0),
vec4(0.0,cos(rotationX),-sin(rotationX),0.0),
vec4(0.0,sin(rotationX),cos(rotationX),0.0),
vec4(0.0,0.0,0.0,1.0)
);
}
void main()
{
vec4 vPosition = vec4(position, 1.0);
vec4 modelViewPosition = modelViewMatrix * vPosition;
float bend = radians(-90.0);
vec4 newPos = rotateX(bend) * vPosition;
distance = -modelViewPosition.z;
// Show bent position
//gl_Position = projectionMatrix * modelViewMatrix * newPos;
float factor = 0.0;
//if (vPosition.x > 0.0) {
// factor = 1.0;
//}
//factor = clamp(0.0, 1.0, distance / 2000.0);
vPosition = mix(vPosition, newPos, factor);
gl_Position = projectionMatrix * modelViewMatrix * vPosition;
}
I'm doing the following:
Calculate the rotated position of the vertex (the vertical version)
Find the distance from the vertex to the camera
Use mix to blend between the horizontal position and vertical position depending on the distance
I've tried multiple approaches and I just can't seem to get it to work correctly.
Any ideas? Even pointing me down the right path will be immensely helpful as my shader/matrix knowledge is limited.
The major issue is, that you tessellate the THREE.PlaneBufferGeometry in width segments, but not in height segments:
groundGeometry = new THREE.PlaneBufferGeometry(
1000, 10000,
100, // <----- widthSegments
100 ); // <----- heightSegments is missing
Now you can use the z coordinate of the view space for the interpolation:
float factor = -modelViewPosition.z / 2000.0;
var camera, controls, scene, renderer;
var groundGeometry, groundMaterial, groundMesh;
var ambientLight;
init();
initLight();
initGround();
animate();
function init() {
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 0.01, 10000 );
camera.position.y = 500;
camera.position.z = 1000;
controls = new THREE.MapControls( camera );
controls.maxPolarAngle = Math.PI / 2;
scene = new THREE.Scene();
scene.add(camera);
var axesHelper = new THREE.AxesHelper( 500 );
scene.add( axesHelper );
renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
}
function initLight() {
ambientLight = new THREE.AmbientLight( 0x404040 );
scene.add( ambientLight );
}
function initGround() {
groundMaterial = new THREE.ShaderMaterial({
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent,
transparent: true
});
groundGeometry = new THREE.PlaneBufferGeometry( 1000, 10000, 100, 100 );
groundMesh = new THREE.Mesh( groundGeometry, groundMaterial );
groundMesh.position.z = -3000;
groundMesh.position.y = -100;
groundMesh.rotateX(-Math.PI / 2)
scene.add( groundMesh );
}
function animate() {
requestAnimationFrame( animate );
controls.update();
renderer.render( scene, camera );
}
<script type="x-shader/x-vertex" id="vertexShader">
varying float distance;
mat4 rotateX(float rotationX) {
return mat4(
vec4(1.0,0.0,0.0,0.0),
vec4(0.0,cos(rotationX),-sin(rotationX),0.0),
vec4(0.0,sin(rotationX),cos(rotationX),0.0),
vec4(0.0,0.0,0.0,1.0)
);
}
void main()
{
vec4 vPosition = vec4(position, 1.0);
vec4 modelViewPosition = modelViewMatrix * vPosition;
float bend = radians(-90.0);
vec4 newPos = rotateX(bend) * vPosition;
distance = -modelViewPosition.z;
float factor = -modelViewPosition.z / 2000.0;
vPosition = mix(vPosition, newPos, factor);
gl_Position = projectionMatrix * modelViewMatrix * vPosition;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
varying float distance;
void main() {
if (distance < 3000.0) {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
} else {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
}
</script>
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://rawgit.com/mrdoob/three.js/dev/examples/js/controls/MapControls.js"></script>

Removing moire patterns produced by GLSL shaders

I have setup this minimal test case, which you can easily see the moire patterns produced by undersampling the oscilating red colour using a custom fragment shader (jsfiddle).
What is the general technique for removing such patterns using GLSL? I assume it involves the derivatives extension, but I've never quite understood how to implement it. I basically have to do anti-aliasing, I think?
var canvas = document.getElementById('canvas');
var scene = new THREE.Scene();
var renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true});
var camera = new THREE.PerspectiveCamera(75, canvas.clientWidth / canvas.clientWidth, 1, 1000);
var geometry = new THREE.SphereGeometry(50, 50, 50);
var material = new THREE.ShaderMaterial({
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent
});
var sphere = new THREE.Mesh(geometry, material);
scene.add(sphere);
camera.position.z = 100;
var period = 30;
var clock = new THREE.Clock();
render();
function render() {
requestAnimationFrame(render);
if (canvas.width !== canvas.clientWidth || canvas.height !== canvas.clientHeight) {
renderer.setSize(canvas.clientWidth, canvas.clientHeight, false);
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
sphere.rotation.y -= clock.getDelta() * 2 * Math.PI / period;
renderer.render(scene, camera);
}
html, body, #canvas {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
overflow: hidden;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
<canvas id="canvas"></canvas>
<script id="vertex-shader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
#define M_TAU 6.2831853071795864769252867665590
varying vec2 vUv;
void main() {
float w = sin(500.0 * M_TAU * vUv.x) / 2.0 + 0.5;
vec3 color = vec3(w, 0.0, 0.0);
gl_FragColor = vec4(color, 1.0);
}
</script>
Update: I've tried to implement super-sampling, not sure if I have implemented it correctly but it doesn't seem to help too much.
Unfortunately, the moire pattern here is a result of the high-contrast lines approaching the Nyquist Frequency. In other words, there's no good way to have a 1- or 2-pixel-wide high-contrast line smoothly shift to the next pixel over, without either introducing such artifacts, or blurring the lines to be indistinguishable.
You mentioned the derivatives extension, and indeed that extension can be used to figure out how quickly your UVs are changing in screen space, and thus, figure out how much blurring is needed to sort of sweep this problem under the rug. In the modified version of your own example below, I attempt to use fwidth to turn the sphere red where the noise gets bad. Try playing with some of the floats that are defined to constants here, see what you can find.
var canvas = document.getElementById('canvas');
var scene = new THREE.Scene();
var renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true});
var camera = new THREE.PerspectiveCamera(75, canvas.clientWidth / canvas.clientWidth, 1, 1000);
var geometry = new THREE.SphereGeometry(50, 50, 50);
var material = new THREE.ShaderMaterial({
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent
});
var sphere = new THREE.Mesh(geometry, material);
scene.add(sphere);
camera.position.z = 100;
var period = 30;
var clock = new THREE.Clock();
render();
function render() {
requestAnimationFrame(render);
if (canvas.width !== canvas.clientWidth || canvas.height !== canvas.clientHeight) {
renderer.setSize(canvas.clientWidth, canvas.clientHeight, false);
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
sphere.rotation.y -= clock.getDelta() * 2 * Math.PI / period;
renderer.render(scene, camera);
}
html, body, #canvas {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
overflow: hidden;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
<canvas id="canvas"></canvas>
<script id="vertex-shader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
#extension GL_OES_standard_derivatives : enable
#define M_TAU 6.2831853071795864769252867665590
varying vec2 vUv;
void main() {
float linecount = 200.0;
float thickness = 0.0;
float blendregion = 2.8;
// Loosely based on https://github.com/AnalyticalGraphicsInc/cesium/blob/1.16/Source/Shaders/Materials/GridMaterial.glsl#L17-L34
float scaledWidth = fract(linecount * vUv.s);
scaledWidth = abs(scaledWidth - floor(scaledWidth + 0.5));
vec2 dF = fwidth(vUv) * linecount;
float value = 1.0 - smoothstep(dF.s * thickness, dF.s * (thickness + blendregion), scaledWidth);
gl_FragColor = vec4(value, 0.0, 0.0, 1.0);
}
</script>

Resources