Link to thread threejs discourse: https://discourse.threejs.org/t/fbo-particles-with-cumulative-movement/7221
This is difficult for me to explain because of my limited knowledge on the subject, but I'm gonna do my best..
At this point, I have a basic FBO particle system in place that works. The following is how it's set up:
var FBO = function( exports ){
var scene, orthoCamera, rtt;
exports.init = function( width, height, renderer, simulationMaterial, renderMaterial ){
var gl = renderer.getContext();
//1 we need FLOAT Textures to store positions
//https://github.com/KhronosGroup/WebGL/blob/master/sdk/tests/conformance/extensions/oes-texture-float.html
if (!gl.getExtension("OES_texture_float")){
throw new Error( "float textures not supported" );
}
//2 we need to access textures from within the vertex shader
//https://github.com/KhronosGroup/WebGL/blob/90ceaac0c4546b1aad634a6a5c4d2dfae9f4d124/conformance-suites/1.0.0/extra/webgl-info.html
if( gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) == 0 ) {
throw new Error( "vertex shader cannot read textures" );
}
//3 rtt setup
scene = new THREE.Scene();
orthoCamera = new THREE.OrthographicCamera(-1,1,1,-1,1/Math.pow( 2, 53 ),1 );
//4 create a target texture
var options = {
minFilter: THREE.NearestFilter,//important as we want to sample square pixels
magFilter: THREE.NearestFilter,//
format: THREE.RGBAFormat,//180407 changed to RGBAFormat
type:THREE.FloatType//important as we need precise coordinates (not ints)
};
rtt = new THREE.WebGLRenderTarget( width,height, options);
//5 the simulation:
//create a bi-unit quadrilateral and uses the simulation material to update the Float Texture
var geom = new THREE.BufferGeometry();
geom.addAttribute( 'position', new THREE.BufferAttribute( new Float32Array([ -1,-1,0, 1,-1,0, 1,1,0, -1,-1, 0, 1, 1, 0, -1,1,0 ]), 3 ) );
geom.addAttribute( 'uv', new THREE.BufferAttribute( new Float32Array([ 0,1, 1,1, 1,0, 0,1, 1,0, 0,0 ]), 2 ) );
scene.add( new THREE.Mesh( geom, simulationMaterial ) );
//6 the particles:
//create a vertex buffer of size width * height with normalized coordinates
var l = (width * height );
var vertices = new Float32Array( l * 3 );
for ( var i = 0; i < l; i++ ) {
var i3 = i * 3;
vertices[ i3 ] = ( i % width ) / width ;
vertices[ i3 + 1 ] = ( i / width ) / height;
}
//create the particles geometry
var geometry = new THREE.BufferGeometry();
geometry.addAttribute( 'position', new THREE.BufferAttribute( vertices, 3 ) );
//the rendermaterial is used to render the particles
exports.particles = new THREE.Points( geometry, renderMaterial );
exports.particles.frustumCulled = false;
exports.renderer = renderer;
};
//7 update loop
exports.update = function(){
//1 update the simulation and render the result in a target texture
// exports.renderer.render( scene, orthoCamera, rtt, true );
exports.renderer.setRenderTarget( rtt );
exports.renderer.render( scene, orthoCamera );
exports.renderer.setRenderTarget( null );
//2 use the result of the swap as the new position for the particles' renderer
// had to add .texture on the end of rtt for r103
exports.particles.material.uniforms.positions.value = rtt.texture;
};
return exports;
}({});
The following are the shaders it uses:
<script type="x-shader/x-vertex" id="simulation_vs">
//vertex shader
varying vec2 vUv;
void main() {
vUv = vec2(uv.x, uv.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script type="x-shader/x-fragment" id="simulation_fs">
//fragment Shader
uniform sampler2D positions;//DATA Texture containing original positions
varying vec2 vUv;
void main() {
//basic simulation: displays the particles in place.
vec3 pos = texture2D( positions, vUv ).rgb;
// we can move the particle here
gl_FragColor = vec4( pos,1.0 );
}
</script>
<script type="x-shader/x-vertex" id="render_vs">
//vertex shader
uniform sampler2D positions;//RenderTarget containing the transformed positions
uniform float pointSize;//size
void main() {
//the mesh is a nomrliazed square so the uvs = the xy positions of the vertices
vec3 pos = texture2D( positions, position.xy ).xyz;
//pos now contains a 3D position in space, we can use it as a regular vertex
//regular projection of our position
gl_Position = projectionMatrix * modelViewMatrix * vec4( pos, 1.0 );
//sets the point size
gl_PointSize = pointSize;
}
</script>
<script type="x-shader/x-fragment" id="render_fs">
//fragment shader
void main()
{
gl_FragColor = vec4( vec3( 1. ), .25 );
}
</script>
I understand that I would move the particles in the "simulation_fs", but if I move a particle in that shader, if I try to do something like this,
pos.x += 1.0;
it will still only shift it one unit from the original texture position. I want the movement to be cumulative.
Would using a 2nd set of simulation shaders allow me to move the particles in a cumulative way? Is that a practical solution?
For cumulative movement, you need to use uniforms:
Look into passing a uniform named time to your vertex shader. Then you can update the time once per frame, and you can use that to animate your vertex positions. For example:
position.x = 2.0 * time; // Increment linearly
position.x = sin(time); // Sin wave back-forth animation
Without a changing variable, your vertex animations will be static from one frame to the next.
I needed to accomplish something like this, and set up an absolutely minimal example that I can tweak in the future. You'll see the positional changes are cumulative.
The following was simplified from a wonderful discussion of FBO's by Nicolas Barradeau (a webgl wizard):
// specify the container where we'll render the scene
var elem = document.querySelector('body'),
elemW = elem.clientWidth,
elemH = elem.clientHeight
// generate a scene object
var scene = new THREE.Scene();
// generate a camera
var camera = new THREE.PerspectiveCamera(75, elemW/elemH, 0.001, 100);
// generate a renderer
var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(elemW, elemH);
elem.appendChild(renderer.domElement);
// generate controls
var controls = new THREE.TrackballControls(camera, renderer.domElement);
// position camera and controls
camera.position.set(0.5, 0.5, -5);
controls.target = new THREE.Vector3(0.5, 0.5, 0);
/**
* FBO
**/
// verify browser agent supports "frame buffer object" features
gl = renderer.getContext();
if (!gl.getExtension('OES_texture_float') ||
gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) == 0) {
alert(' * Cannot create FBO :(');
}
// set initial positions of `w*h` particles
var w = h = 256,
i = 0,
data = new Float32Array(w*h*3);
for (var x=0; x<w; x++) {
for (var y=0; y<h; y++) {
data[i++] = x/w;
data[i++] = y/h;
data[i++] = 0;
}
}
// feed those positions into a data texture
var dataTex = new THREE.DataTexture(data, w, h, THREE.RGBFormat, THREE.FloatType);
dataTex.minFilter = THREE.NearestFilter;
dataTex.magFilter = THREE.NearestFilter;
dataTex.needsUpdate = true;
// add the data texture with positions to a material for the simulation
var simMaterial = new THREE.RawShaderMaterial({
uniforms: { posTex: { type: 't', value: dataTex }, },
vertexShader: document.querySelector('#sim-vs').textContent,
fragmentShader: document.querySelector('#sim-fs').textContent,
});
// delete dataTex; it isn't used after initializing point positions
delete dataTex;
THREE.FBO = function(w, simMat) {
this.scene = new THREE.Scene();
this.camera = new THREE.OrthographicCamera(-w/2, w/2, w/2, -w/2, -1, 1);
this.scene.add(new THREE.Mesh(new THREE.PlaneGeometry(w, w), simMat));
};
// create a scene where we'll render the positional attributes
var fbo = new THREE.FBO(w, simMaterial);
// create render targets a + b to which the simulation will be rendered
var renderTargetA = new THREE.WebGLRenderTarget(w, h, {
wrapS: THREE.RepeatWrapping,
wrapT: THREE.RepeatWrapping,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
format: THREE.RGBFormat,
type: THREE.FloatType,
stencilBuffer: false,
});
// a second render target lets us store input + output positional states
renderTargetB = renderTargetA.clone();
// render the positions to the render targets
renderer.render(fbo.scene, fbo.camera, renderTargetA, false);
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// store the uv attrs; each is x,y and identifies a given point's
// position data within the positional texture; must be scaled 0:1!
var geo = new THREE.BufferGeometry(),
arr = new Float32Array(w*h*3);
for (var i=0; i<arr.length; i++) {
arr[i++] = (i%w)/w;
arr[i++] = Math.floor(i/w)/h;
arr[i++] = 0;
}
geo.addAttribute('position', new THREE.BufferAttribute(arr, 3, true))
// create material the user sees
var material = new THREE.RawShaderMaterial({
uniforms: {
posMap: { type: 't', value: null }, // `posMap` is set each render
},
vertexShader: document.querySelector('#ui-vert').textContent,
fragmentShader: document.querySelector('#ui-frag').textContent,
transparent: true,
});
// add the points the user sees to the scene
var mesh = new THREE.Points(geo, material);
scene.add(mesh);
function render() {
// at the start of the render block, A is one frame behind B
var oldA = renderTargetA; // store A, the penultimate state
renderTargetA = renderTargetB; // advance A to the updated state
renderTargetB = oldA; // set B to the penultimate state
// pass the updated positional values to the simulation
simMaterial.uniforms.posTex.value = renderTargetA.texture;
// run a frame and store the new positional values in renderTargetB
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// pass the new positional values to the scene users see
material.uniforms.posMap.value = renderTargetB.texture;
// render the scene users see as normal
renderer.render(scene, camera);
controls.update();
requestAnimationFrame(render);
};
render();
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/101/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/TrackballControls.js"></script>
<!-- The simulation shaders update positional attributes -->
<script id='sim-vs' type='x-shader/x-vert'>
precision mediump float;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 uv; // x,y offsets of each point in texture
attribute vec3 position;
varying vec2 vUv;
void main() {
vUv = vec2(uv.x, 1.0 - uv.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script id='sim-fs' type='x-shader/x-frag'>
precision mediump float;
uniform sampler2D posTex;
varying vec2 vUv;
void main() {
// read the supplied x,y,z vert positions
vec3 pos = texture2D(posTex, vUv).xyz;
// update the positional attributes here!
pos.x += cos(pos.y) / 100.0;
pos.y += tan(pos.x) / 100.0;
// render the new positional attributes
gl_FragColor = vec4(pos, 1.0);
}
</script>
<!-- The ui shaders render what the user sees -->
<script id='ui-vert' type='x-shader/x-vert'>
precision mediump float;
uniform sampler2D posMap; // contains positional data read from sim-fs
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 position;
void main() {
// read this particle's position, which is stored as a pixel color
vec3 pos = texture2D(posMap, position.xy).xyz;
// project this particle
vec4 mvPosition = modelViewMatrix * vec4(pos, 1.0);
gl_Position = projectionMatrix * mvPosition;
// set the size of each particle
gl_PointSize = 0.3 / -mvPosition.z;
}
</script>
<script id='ui-frag' type='x-shader/x-frag'>
precision mediump float;
void main() {
gl_FragColor = vec4(0.0, 0.5, 1.5, 1.0);
}
</script>
Related
I've read that WebGL2 gives us access to 3d textures. I'm trying to use this to perform some GPU-side computations and then store the output in a 64x64x64 3D texture. The render flow is
compute shader -> render to 3dTexture -> read shader -> render to screen
This is my simple compute shader, the texture's RGB channels should correspond to the XYZ fragment coordinates.
#version 300 es
precision mediump sampler3D;
precision highp float;
layout(location = 0) out highp vec4 pc_fragColor;
void main() {
vec3 color = vec3(gl_FragCoord.x / 64.0, gl_FragCoord.y / 64.0, gl_FragDepth);
pc_fragColor.rgb = color;
pc_fragColor.a = 1.0;
}
However, this only seems to be rendering to a single "slice" of the 3DTexture, where depth is 0.0. All subsequent depths from 1 to 63 px remain black:
I've created a working demo below to demonstrate this issue.
var renderer, target3d, camera;
const SIDE = 64;
var computeMaterial, computeMesh;
var readDataMaterial, readDataMesh,
read3dTargetMaterial, read3dTargetMesh;
var textField = document.querySelector("#textField");
function init() {
// Three.js boilerplate
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setClearColor(new THREE.Color(0x000000), 1.0);
document.body.appendChild(renderer.domElement);
camera = new THREE.Camera();
// Create volume material to render to 3dTexture
computeMaterial = new THREE.RawShaderMaterial({
vertexShader: SIMPLE_VERTEX,
fragmentShader: COMPUTE_FRAGMENT,
uniforms: {
uZCoord: { value: 0.0 },
},
depthTest: false,
});
computeMaterial.type = "VolumeShader";
computeMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), computeMaterial);
// Left material, reads Data3DTexture
readDataMaterial = new THREE.RawShaderMaterial({
vertexShader: SIMPLE_VERTEX,
fragmentShader: READ_FRAGMENT,
uniforms: {
uZCoord: { value: 0.0 },
tDiffuse: { value: create3dDataTexture() }
},
depthTest: false
});
readDataMaterial.type = "DebugShader";
readDataMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), readDataMaterial);
// Right material, reads 3DRenderTarget texture
target3d = new THREE.WebGL3DRenderTarget(SIDE, SIDE, SIDE);
target3d.depthBuffer = false;
read3dTargetMaterial = readDataMaterial.clone();
read3dTargetMaterial.uniforms.tDiffuse.value = target3d.texture;
read3dTargetMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), read3dTargetMaterial);
}
// Creates 3D texture with RGB gradient along the XYZ axes
function create3dDataTexture() {
const d = new Uint8Array( SIDE * SIDE * SIDE * 4 );
window.dat = d;
let i4 = 0;
for ( let z = 0; z < SIDE; z ++ ) {
for ( let y = 0; y < SIDE; y ++ ) {
for ( let x = 0; x < SIDE; x ++ ) {
d[i4 + 0] = (x / SIDE) * 255;
d[i4 + 1] = (y / SIDE) * 255;
d[i4 + 2] = (z / SIDE) * 255;
d[i4 + 3] = 1.0;
i4 += 4;
}
}
}
const texture = new THREE.Data3DTexture( d, SIDE, SIDE, SIDE );
texture.format = THREE.RGBAFormat;
texture.minFilter = THREE.NearestFilter;
texture.magFilter = THREE.NearestFilter;
texture.unpackAlignment = 1;
texture.needsUpdate = true;
return texture;
}
function onResize() {
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate(t) {
// Render volume shader to target3d buffer
renderer.setRenderTarget(target3d);
renderer.render(computeMesh, camera);
// Update z texture coordinate along sine wave
renderer.autoClear = false;
const sinZCoord = Math.sin(t / 1000);
readDataMaterial.uniforms.uZCoord.value = sinZCoord;
read3dTargetMaterial.uniforms.uZCoord.value = sinZCoord;
textField.innerText = sinZCoord.toFixed(4);
// Render data3D texture to screen
renderer.setViewport(0, window.innerHeight - SIDE*4, SIDE * 4, SIDE * 4);
renderer.setRenderTarget(null);
renderer.render(readDataMesh, camera);
// Render 3dRenderTarget texture to screen
renderer.setViewport(SIDE * 4, window.innerHeight - SIDE*4, SIDE * 4, SIDE * 4);
renderer.setRenderTarget(null);
renderer.render(read3dTargetMesh, camera);
renderer.autoClear = true;
requestAnimationFrame(animate);
}
init();
window.addEventListener("resize", onResize);
requestAnimationFrame(animate);
html, body {
width: 100%;
height: 100%;
margin: 0;
overflow: hidden;
}
#title {
position: absolute;
top: 0;
left: 0;
color: white;
font-family: sans-serif;
}
h3 {
margin: 2px;
}
<div id="title">
<h3>texDepth</h3><h3 id="textField"></h3>
</div>
<script src="https://threejs.org/build/three.js"></script>
<script>
/////////////////////////////////////////////////////////////////////////////////////
// Compute frag shader
// It should output an RGB gradient in the XYZ axes to the 3DRenderTarget
// But gl_FragCoord.z is always 0.5 and gl_FragDepth is always 0.0
const COMPUTE_FRAGMENT = `#version 300 es
precision mediump sampler3D;
precision highp float;
precision highp int;
layout(location = 0) out highp vec4 pc_fragColor;
void main() {
vec3 color = vec3(gl_FragCoord.x / 64.0, gl_FragCoord.y / 64.0, gl_FragDepth);
pc_fragColor.rgb = color;
pc_fragColor.a = 1.0;
}`;
/////////////////////////////////////////////////////////////////////////////////////
// Reader frag shader
// Samples the 3D texture along uv.x, uv.y, and uniform Z coordinate
const READ_FRAGMENT = `#version 300 es
precision mediump sampler3D;
precision highp float;
precision highp int;
layout(location = 0) out highp vec4 pc_fragColor;
in vec2 vUv;
uniform sampler3D tDiffuse;
uniform float uZCoord;
void main() {
vec3 UV3 = vec3(vUv.x, vUv.y, uZCoord);
vec3 diffuse = texture(tDiffuse, UV3).rgb;
pc_fragColor.rgb = diffuse;
pc_fragColor.a = 1.0;
}
`;
/////////////////////////////////////////////////////////////////////////////////////
// Simple vertex shader,
// renders a full-screen quad with UVs without any transformations
const SIMPLE_VERTEX = `#version 300 es
precision highp float;
precision highp int;
in vec2 uv;
in vec3 position;
out vec2 vUv;
void main() {
vUv = uv;
gl_Position = vec4(position, 1.0);
}`;
/////////////////////////////////////////////////////////////////////////////////////
</script>
On the left side, I’m sampling a Data3DTexture that I created via JavaScript. The blue channel smoothly transitions as I move up and down the depth axis, as expected.
On the right side I’m sampling the WebGL3DRenderTarget texture rendered in the frag shader I showed above. As you can see, it's only rendering to the texture when the depth coordinate is 0.0. All the other “slices” are black.
How can I render my computations to all 64 depth slices? I'm using Three.js for this demo, but I could use any other library like TWGL or vanilla WebGL to achieve the same results.
It doesn't look documented but you can use a second argument to setRenderTarget to set the "layer" of the 3d render target to render to. Here are the changes to make:
When rendering to the render target perform a new render for every layer:
for ( let i = 0; i < SIDE; i ++ ) {
// set the uZCoord color value for the shader
computeMesh.material.uniforms.uZCoord.value = i / (SIDE - 1);
// Set the 3d target "layer" to render into before rendering
renderer.setRenderTarget(target3d, i);
renderer.render(computeMesh, camera);
}
Use the "uZCoord" uniform in the compute fragment shader:
uniform float uZCoord;
void main() {
vec3 color = vec3(gl_FragCoord.x / 64.0, gl_FragCoord.y / 64.0, uZCoord);
pc_fragColor.rgb = color;
pc_fragColor.a = 1.0;
}
Other than that I don't believe theres a way to render to the full 3d volume of the target in a single draw call. This three.js example shows how to do this but with render target arrays, as well:
https://threejs.org/examples/?q=array#webgl2_rendertarget_texture2darray
var renderer, target3d, camera;
const SIDE = 64;
var computeMaterial, computeMesh;
var readDataMaterial, readDataMesh,
read3dTargetMaterial, read3dTargetMesh;
var textField = document.querySelector("#textField");
function init() {
// Three.js boilerplate
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setClearColor(new THREE.Color(0x000000), 1.0);
document.body.appendChild(renderer.domElement);
camera = new THREE.Camera();
// Create volume material to render to 3dTexture
computeMaterial = new THREE.RawShaderMaterial({
vertexShader: SIMPLE_VERTEX,
fragmentShader: COMPUTE_FRAGMENT,
uniforms: {
uZCoord: { value: 0.0 },
},
depthTest: false,
});
computeMaterial.type = "VolumeShader";
computeMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), computeMaterial);
// Left material, reads Data3DTexture
readDataMaterial = new THREE.RawShaderMaterial({
vertexShader: SIMPLE_VERTEX,
fragmentShader: READ_FRAGMENT,
uniforms: {
uZCoord: { value: 0.0 },
tDiffuse: { value: create3dDataTexture() }
},
depthTest: false
});
readDataMaterial.type = "DebugShader";
readDataMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), readDataMaterial);
// Right material, reads 3DRenderTarget texture
target3d = new THREE.WebGL3DRenderTarget(SIDE, SIDE, SIDE);
target3d.depthBuffer = false;
read3dTargetMaterial = readDataMaterial.clone();
read3dTargetMaterial.uniforms.tDiffuse.value = target3d.texture;
read3dTargetMesh = new THREE.Mesh(new THREE.PlaneGeometry(2, 2), read3dTargetMaterial);
}
// Creates 3D texture with RGB gradient along the XYZ axes
function create3dDataTexture() {
const d = new Uint8Array( SIDE * SIDE * SIDE * 4 );
window.dat = d;
let i4 = 0;
for ( let z = 0; z < SIDE; z ++ ) {
for ( let y = 0; y < SIDE; y ++ ) {
for ( let x = 0; x < SIDE; x ++ ) {
d[i4 + 0] = (x / SIDE) * 255;
d[i4 + 1] = (y / SIDE) * 255;
d[i4 + 2] = (z / SIDE) * 255;
d[i4 + 3] = 1.0;
i4 += 4;
}
}
}
const texture = new THREE.Data3DTexture( d, SIDE, SIDE, SIDE );
texture.format = THREE.RGBAFormat;
texture.minFilter = THREE.NearestFilter;
texture.magFilter = THREE.NearestFilter;
texture.unpackAlignment = 1;
texture.needsUpdate = true;
return texture;
}
function onResize() {
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate(t) {
for ( let i = 0; i < SIDE; i ++ ) {
// Render volume shader to target3d buffer
computeMesh.material.uniforms.uZCoord.value = i / ( SIDE - 1 );
renderer.setRenderTarget(target3d, i);
renderer.render(computeMesh, camera);
}
// Update z texture coordinate along sine wave
renderer.autoClear = false;
const sinZCoord = Math.sin(t / 1000);
readDataMaterial.uniforms.uZCoord.value = sinZCoord;
read3dTargetMaterial.uniforms.uZCoord.value = sinZCoord;
textField.innerText = sinZCoord.toFixed(4);
// Render data3D texture to screen
renderer.setViewport(0, window.innerHeight - SIDE*4, SIDE * 4, SIDE * 4);
renderer.setRenderTarget(null);
renderer.render(readDataMesh, camera);
// Render 3dRenderTarget texture to screen
renderer.setViewport(SIDE * 4, window.innerHeight - SIDE*4, SIDE * 4, SIDE * 4);
renderer.setRenderTarget(null);
renderer.render(read3dTargetMesh, camera);
renderer.autoClear = true;
requestAnimationFrame(animate);
}
init();
window.addEventListener("resize", onResize);
requestAnimationFrame(animate);
html, body {
width: 100%;
height: 100%;
margin: 0;
overflow: hidden;
}
#title {
position: absolute;
top: 0;
left: 0;
color: white;
font-family: sans-serif;
}
h3 {
margin: 2px;
}
<div id="title">
<h3>texDepth</h3><h3 id="textField"></h3>
</div>
<script src="https://threejs.org/build/three.js"></script>
<script>
/////////////////////////////////////////////////////////////////////////////////////
// Compute frag shader
// It should output an RGB gradient in the XYZ axes to the 3DRenderTarget
// But gl_FragCoord.z is always 0.5 and gl_FragDepth is always 0.0
const COMPUTE_FRAGMENT = `#version 300 es
precision mediump sampler3D;
precision highp float;
precision highp int;
layout(location = 0) out highp vec4 pc_fragColor;
uniform float uZCoord;
void main() {
vec3 color = vec3(gl_FragCoord.x / 64.0, gl_FragCoord.y / 64.0, uZCoord);
pc_fragColor.rgb = color;
pc_fragColor.a = 1.0;
}`;
/////////////////////////////////////////////////////////////////////////////////////
// Reader frag shader
// Samples the 3D texture along uv.x, uv.y, and uniform Z coordinate
const READ_FRAGMENT = `#version 300 es
precision mediump sampler3D;
precision highp float;
precision highp int;
layout(location = 0) out highp vec4 pc_fragColor;
in vec2 vUv;
uniform sampler3D tDiffuse;
uniform float uZCoord;
void main() {
vec3 UV3 = vec3(vUv.x, vUv.y, uZCoord);
vec3 diffuse = texture(tDiffuse, UV3).rgb;
pc_fragColor.rgb = diffuse;
pc_fragColor.a = 1.0;
}
`;
/////////////////////////////////////////////////////////////////////////////////////
// Simple vertex shader,
// renders a full-screen quad with UVs without any transformations
const SIMPLE_VERTEX = `#version 300 es
precision highp float;
precision highp int;
in vec2 uv;
in vec3 position;
out vec2 vUv;
void main() {
vUv = uv;
gl_Position = vec4(position, 1.0);
}`;
/////////////////////////////////////////////////////////////////////////////////////
</script>
I have been learning webgl from webglfundamentals where I came across a simple shader example where you can paint triangles with solid color. Here is link to tutorial and original demo.
I tried to create same effect in three js using plane geometry but I can't manage achieve solid color shader. When I use almost same setup in Three js, I get more like gradient effect. What am I doing wrong here? (I am noticing that my shader isn't consistent either as it renders differently on refresh) Also, is there place to learn shaders specifically for three js?
var vShader = `
precision mediump float;
precision mediump int;
attribute vec4 a_color;
varying vec4 vColor;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vColor = a_color;
}`;
var fShader = ` precision mediump float;
precision mediump int;
varying vec4 vColor;
void main() {
vec4 color = vec4( vColor );
gl_FragColor = vColor;
}`;
var row = 1;
var col = 1;
var w = 600;
var h = 400;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, w / h, 0.1, 100);
renderer = new THREE.WebGLRenderer();
camera.position.z = 5;
var viewSize = getViewSize(camera);
document.body.appendChild(renderer.domElement);
renderer.setSize(w, h);
var geometry = new THREE.PlaneBufferGeometry(viewSize.width, viewSize.height, col, row);
var color = new THREE.Color();
const blossomPalette = [0xff0000, 0xff0000, 0xff0000, 0x0000ff, 0x0000ff, 0x0000ff];
var colors = new Float32Array(4 * 2 * 3 * col * row);
for (let i = 0; i < 4; i++) {
color.setHex(blossomPalette[Math.floor(Math.random() * blossomPalette.length)]);
color.toArray(colors, i * 3);
}
geometry.setAttribute('a_color', new THREE.BufferAttribute(colors, 4, false));
var material = new THREE.ShaderMaterial({
vertexShader: vShader,
fragmentShader: fShader,
transparent: true,
blending: THREE.AdditiveBlending,
depthTest: false,
vertexColors: true,
flatShading: true
});
var plane = new THREE.Mesh(geometry, material);
scene.add(plane);
function animate() {
renderer.render(scene, camera);
requestAnimationFrame(animate)
}
animate();
function getViewSize(camera) {
var fovInRadians = (camera.fov * Math.PI) / 180;
var height = Math.abs(camera.position.z * Math.tan(fovInRadians / 2) * 2);
return {
width: height * camera.aspect,
height: height
}
}
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r122/build/three.min.js"></script>
The PlaneBufferGeometry in three.js is using indexed vertices to share vertices so there are only 4 vertices and then 6 indices to use those 4 vertices to make 2 triangles. That means you can't give each triangle different solid colors because they share 2 vertices and a vertex can only have 1 color.
Further, the code is choosing random colors for each vertex so even if you you used 6 vertices so that the 2 triangles didn't share any you still wouldn't get the result you linked to, instead you'd get this result which is further down the page on the same tutorial.
Finally the code is only generating 3 floats per color so you need to set the number of components for the color attribute to 3 instead of 4
If you want to repeat the webgl sample you'll need to provide your own 6 vertices.
var vShader = `
precision mediump float;
precision mediump int;
attribute vec4 a_color;
varying vec4 vColor;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vColor = a_color;
}`;
var fShader = ` precision mediump float;
precision mediump int;
varying vec4 vColor;
void main() {
vec4 color = vec4( vColor );
gl_FragColor = vColor;
}`;
var row = 1;
var col = 1;
var w = 600;
var h = 400;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, w / h, 0.1, 100);
renderer = new THREE.WebGLRenderer();
camera.position.z = 5;
var viewSize = getViewSize(camera);
document.body.appendChild(renderer.domElement);
renderer.setSize(w, h);
var geometry = new THREE.BufferGeometry();
var x = viewSize.width / 2;
var y = viewSize.height / 2;
var positions = new Float32Array([
-x, -y, 0,
x, -y, 0,
-x, y, 0,
-x, y, 0,
x, -y, 0,
x, y, 0,
]);
geometry.setAttribute('position', new THREE.BufferAttribute(positions, 3, false));
var color = new THREE.Color();
const blossomPalette = [
0xff0000, 0xff0000, 0xff0000,
0x0000ff, 0x0000ff, 0x0000ff,
];
var colors = new Float32Array(2 * 3 * 3 * col * row);
for (let i = 0; i < 6; i++) {
color.setHex(blossomPalette[i]);
color.toArray(colors, i * 3);
}
geometry.setAttribute('a_color', new THREE.BufferAttribute(colors, 3, false));
var material = new THREE.ShaderMaterial({
vertexShader: vShader,
fragmentShader: fShader,
transparent: true,
blending: THREE.AdditiveBlending,
depthTest: false,
vertexColors: true,
flatShading: true
});
var plane = new THREE.Mesh(geometry, material);
scene.add(plane);
function animate() {
renderer.render(scene, camera);
requestAnimationFrame(animate)
}
animate();
function getViewSize(camera) {
var fovInRadians = (camera.fov * Math.PI) / 180;
var height = Math.abs(camera.position.z * Math.tan(fovInRadians / 2) * 2);
return {
width: height * camera.aspect,
height: height
}
}
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r122/build/three.min.js"></script>
See this article
I discovered that solution to create postporcessing effects with three.js :
https://medium.com/#luruke/simple-postprocessing-in-three-js-91936ecadfb7
(made by Luigi De Rosa)
It's a great way to do it. Unfortunately I can't manage to add transparency in my final renderer. Should I add a transparency component inside my postprocessing fragment shader ?
const fragmentShader = `precision highp float;
uniform sampler2D uScene;
uniform vec2 uResolution;
uniform float uTime;
void main() {
vec2 uv = gl_FragCoord.xy / uResolution.xy;
vec3 color = vec3(uv, 1.0);
//simple distortion effect
uv.y += sin(uv.x*30.0+uTime*10.0)/40.0;
uv.x -= sin(uv.y*10.0-uTime)/40.0;
color = texture2D(uScene, uv).rgb;
gl_FragColor = vec4(color, 1.0);
}
`;
Thank you
EDIT 1 :
I added the attribute transparent:true to the RawShaderMaterial.
I changed the format of the new THREE.WebGLRenderTarget by THREE.RGBAFormat instead of THREE.RGBFormat.
I also added those lines at the end of my fragment shader :
gl_FragColor = vec4(color, 1.0);
vec4 tex = texture2D( uScene, uv );
if(tex.a < 0.0) {
gl_FragColor.a = 1.0;
}
But I still doesn't see through my canvas
EDIT 2 :
Here's a snippet with the postProcessing class
let renderer, camera, scene, W = window.innerWidth, H = window.innerHeight, geometry, material, mesh;
initWebgl();
function initWebgl(){
renderer = new THREE.WebGLRenderer( { alpha: true, antialias: true } );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( W, H );
document.querySelector('.innerCanvas').appendChild( renderer.domElement );
camera = new THREE.OrthographicCamera(-W/H/2, W/H/2, 1/2, -1/2, -0.1, 0.1);
scene = new THREE.Scene();
geometry = new THREE.PlaneBufferGeometry(0.5, 0.5);
material = new THREE.MeshNormalMaterial();
mesh = new THREE.Mesh( geometry , material );
scene.add(mesh);
}
function rafP(){
requestAnimationFrame(rafP);
// renderer.render(scene, camera);
post.render(scene, camera);
}
const vertexShader = `precision highp float;
attribute vec2 position;
void main() {
// Look ma! no projection matrix multiplication,
// because we pass the values directly in clip space coordinates.
gl_Position = vec4(position, 1.0, 1.0);
}`;
const fragmentShader = `precision highp float;
uniform sampler2D uScene;
uniform vec2 uResolution;
uniform float uTime;
void main() {
vec2 uv = gl_FragCoord.xy / uResolution.xy;
vec3 color = vec3(uv, 1.0);
uv.y += sin(uv.x*20.0)/10.0;
color = texture2D(uScene, uv).rgb;
gl_FragColor = vec4(color, 1.0);
vec4 tex = texture2D( uScene, uv );
// if(tex.a - percent < 0.0) {
if(tex.a < 0.0) {
gl_FragColor.a = 1.0;
//or without transparent = true use
// discard;
}
}`;
//PostProcessing
class PostFX {
constructor(renderer) {
this.renderer = renderer;
this.scene = new THREE.Scene();
// three.js for .render() wants a camera, even if we're not using it :(
this.dummyCamera = new THREE.OrthographicCamera();
this.geometry = new THREE.BufferGeometry();
// Triangle expressed in clip space coordinates
const vertices = new Float32Array([
-1.0, -1.0,
3.0, -1.0,
-1.0, 3.0
]);
this.geometry.addAttribute('position', new THREE.BufferAttribute(vertices, 2));
this.resolution = new THREE.Vector2();
this.renderer.getDrawingBufferSize(this.resolution);
this.target = new THREE.WebGLRenderTarget(this.resolution.x, this.resolution.y, {
format: THREE.RGBAFormat, //THREE.RGBFormat
stencilBuffer: false,
depthBuffer: true
});
this.material = new THREE.RawShaderMaterial({
fragmentShader,
vertexShader,
uniforms: {
uScene: { value: this.target.texture },
uResolution: { value: this.resolution }
},
transparent:true
});
// TODO: handle the resize -> update uResolution uniform and this.target.setSize()
this.triangle = new THREE.Mesh(this.geometry, this.material);
// Our triangle will be always on screen, so avoid frustum culling checking
this.triangle.frustumCulled = false;
this.scene.add(this.triangle);
}
render(scene, camera) {
this.renderer.setRenderTarget(this.target);
this.renderer.render(scene, camera);
this.renderer.setRenderTarget(null);
this.renderer.render(this.scene, this.dummyCamera);
console.log(this.renderer);
}
}
post = new PostFX(renderer);
rafP();
body{
margin:0;
padding:0;
background:#00F;
}
.innerCanvas{
position:fixed;
top:0;
left:0;
width:100%;
height:100%;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/105/three.js"></script>
<div class="innerCanvas"></div>
On the Alpha channel, 0 means fully transparent and 1 means fully opaque.
The only thing you need, in this case, is to pass gl_FragColor the result from your texture sample. You don't even need to worry about its value.
gl_FragColor = texture2D(uScene, uv);
JSFiddle
I'm learning to manipulate postion values in the GPU using textures a.k.a., Framebuffer Objects (FBO's), while using Three.js. I've been using this question as a starting place, and this example by #mrdoob and #zz85, as well as this old thread.
However, the examples are quite dated (examples use three.js rev.55 vs. current rev.80), so I'm needing make a fair number of revisions and reworks to the code. Before I get in too deep I wanted to pause and ask if any way of handling FBO's has already been written into the Three.js code base, or if I've overlooked an updated script somewhere. Thanks!
If not, I'll do my darnedest and perhaps post the result here if it seems generally useful.
In case you like to "pop the hood", I wanted to share an absolutely minimal example of a FBO scene in THREE.js. Hopefully the inline comments help spell out how this comes together:
// specify the container where we'll render the scene
var elem = document.querySelector('body'),
elemW = elem.clientWidth,
elemH = elem.clientHeight
// generate a scene object
var scene = new THREE.Scene();
// generate a camera
var camera = new THREE.PerspectiveCamera(75, elemW/elemH, 0.001, 100);
// generate a renderer
var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(elemW, elemH);
elem.appendChild(renderer.domElement);
// generate controls
var controls = new THREE.TrackballControls(camera, renderer.domElement);
// position camera and controls
camera.position.set(0.5, 0.5, -5);
controls.target = new THREE.Vector3(0.5, 0.5, 0);
/**
* FBO
**/
// verify browser agent supports "frame buffer object" features
gl = renderer.getContext();
if (!gl.getExtension('OES_texture_float') ||
gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) == 0) {
alert(' * Cannot create FBO :(');
}
// set initial positions of `w*h` particles
var w = h = 256,
i = 0,
data = new Float32Array(w*h*3);
for (var x=0; x<w; x++) {
for (var y=0; y<h; y++) {
data[i++] = x/w;
data[i++] = y/h;
data[i++] = 0;
}
}
// feed those positions into a data texture
var dataTex = new THREE.DataTexture(data, w, h, THREE.RGBFormat, THREE.FloatType);
dataTex.minFilter = THREE.NearestFilter;
dataTex.magFilter = THREE.NearestFilter;
dataTex.needsUpdate = true;
// add the data texture with positions to a material for the simulation
var simMaterial = new THREE.RawShaderMaterial({
uniforms: { posTex: { type: 't', value: dataTex }, },
vertexShader: document.querySelector('#sim-vs').textContent,
fragmentShader: document.querySelector('#sim-fs').textContent,
});
// delete dataTex; it isn't used after initializing point positions
delete dataTex;
THREE.FBO = function(w, simMat) {
this.scene = new THREE.Scene();
this.camera = new THREE.OrthographicCamera(-w/2, w/2, w/2, -w/2, -1, 1);
this.scene.add(new THREE.Mesh(new THREE.PlaneGeometry(w, w), simMat));
};
// create a scene where we'll render the positional attributes
var fbo = new THREE.FBO(w, simMaterial);
// create render targets a + b to which the simulation will be rendered
var renderTargetA = new THREE.WebGLRenderTarget(w, h, {
wrapS: THREE.RepeatWrapping,
wrapT: THREE.RepeatWrapping,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
format: THREE.RGBFormat,
type: THREE.FloatType,
stencilBuffer: false,
});
// a second render target lets us store input + output positional states
renderTargetB = renderTargetA.clone();
// render the positions to the render targets
renderer.render(fbo.scene, fbo.camera, renderTargetA, false);
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// store the uv attrs; each is x,y and identifies a given point's
// position data within the positional texture; must be scaled 0:1!
var geo = new THREE.BufferGeometry(),
arr = new Float32Array(w*h*3);
for (var i=0; i<arr.length; i++) {
arr[i++] = (i%w)/w;
arr[i++] = Math.floor(i/w)/h;
arr[i++] = 0;
}
geo.addAttribute('position', new THREE.BufferAttribute(arr, 3, true))
// create material the user sees
var material = new THREE.RawShaderMaterial({
uniforms: {
posMap: { type: 't', value: null }, // `posMap` is set each render
},
vertexShader: document.querySelector('#ui-vert').textContent,
fragmentShader: document.querySelector('#ui-frag').textContent,
transparent: true,
});
// add the points the user sees to the scene
var mesh = new THREE.Points(geo, material);
scene.add(mesh);
function render() {
// at the start of the render block, A is one frame behind B
var oldA = renderTargetA; // store A, the penultimate state
renderTargetA = renderTargetB; // advance A to the updated state
renderTargetB = oldA; // set B to the penultimate state
// pass the updated positional values to the simulation
simMaterial.uniforms.posTex.value = renderTargetA.texture;
// run a frame and store the new positional values in renderTargetB
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// pass the new positional values to the scene users see
material.uniforms.posMap.value = renderTargetB.texture;
// render the scene users see as normal
renderer.render(scene, camera);
controls.update();
requestAnimationFrame(render);
};
render();
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/101/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/TrackballControls.js"></script>
<!-- The simulation shaders update positional attributes -->
<script id='sim-vs' type='x-shader/x-vert'>
precision mediump float;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 uv; // x,y offsets of each point in texture
attribute vec3 position;
varying vec2 vUv;
void main() {
vUv = vec2(uv.x, 1.0 - uv.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script id='sim-fs' type='x-shader/x-frag'>
precision mediump float;
uniform sampler2D posTex;
varying vec2 vUv;
void main() {
// read the supplied x,y,z vert positions
vec3 pos = texture2D(posTex, vUv).xyz;
// update the positional attributes here!
pos.x += cos(pos.y) / 100.0;
pos.y += tan(pos.x) / 100.0;
// render the new positional attributes
gl_FragColor = vec4(pos, 1.0);
}
</script>
<!-- The ui shaders render what the user sees -->
<script id='ui-vert' type='x-shader/x-vert'>
precision mediump float;
uniform sampler2D posMap; // contains positional data read from sim-fs
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 position;
void main() {
// read this particle's position, which is stored as a pixel color
vec3 pos = texture2D(posMap, position.xy).xyz;
// project this particle
vec4 mvPosition = modelViewMatrix * vec4(pos, 1.0);
gl_Position = projectionMatrix * mvPosition;
// set the size of each particle
gl_PointSize = 0.3 / -mvPosition.z;
}
</script>
<script id='ui-frag' type='x-shader/x-frag'>
precision mediump float;
void main() {
gl_FragColor = vec4(0.0, 0.5, 1.5, 1.0);
}
</script>
I just discovered that there is a way in Three.js to handle Frame Buffer Objects (FBOs) to calculate things like changing position data using the GPU; it is called the THREE.GPUComputationRenderer. There is an excellent flock of birds example here that demonstrates how to pass a number of variables by rendering their values into textures to be used in the final shader.
I am using a texture atlas to hold a sequence of images. When mapping to a mesh with MeshLambertMaterial, using Texture.offset and Texture.repeat works beautifully to cut the subtexture out of the entire image.
However, using the exact same texture instance for a PointCloudMaterial renders the particles with the entire atlas, not just the selected subimage.
I tried to follow the three.js source code, but the documentation is scarce.
Is there a workaround for this better than using canvases to chop up the image?
Edit: As requested, a work-in-progress is available at http://jnm2.com/minesweeper/.
THREE.PointCloudMaterial has been renamed THREE.PointsMaterial.
THREE.PointCloud has been renamed THREE.Points.
You want to use a sprite sheet with your point cloud.
Instead of using PointsMaterial with your Points, you can create a custom ShaderMaterial instead.
The custom ShaderMaterial can access your sprite sheet and use a different sub-image for each particle.
To do so, use a shader like this one:
<script type="x-shader/x-vertex" id="vertexshader">
attribute vec2 offset;
varying vec2 vOffset;
void main() {
vOffset = offset;
gl_PointSize = 25.0;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script type="x-shader/x-fragment" id="fragmentshader">
uniform sampler2D spriteSheet;
uniform vec2 repeat;
varying vec2 vOffset;
void main() {
vec2 uv = vec2( gl_PointCoord.x, 1.0 - gl_PointCoord.y );
vec4 tex = texture2D( spriteSheet, uv * repeat + vOffset );
if ( tex.a < 0.5 ) discard;
gl_FragColor = tex;
}
</script>
Then
// geometry
geometry = new THREE.BufferGeometry();
// attributes
var numVertices = 20;
var positions = new Float32Array( numVertices * 3 ); // 3 coordinates per point
var offsets = new Float32Array( numVertices * 2 ); // 2 coordinates per point
geometry.setAttribute( 'position', new THREE.BufferAttribute( positions, 3 ) );
geometry.setAttribute( 'offset', new THREE.BufferAttribute( offsets, 2 ) );
// populate offsets
var offset = new THREE.Vector2();
for ( var i = 0, index = 0, l = numVertices; i < l; i ++, index += 3 ) {
positions[ index ] = 100 * Math.random() - 50;
positions[ index + 1 ] = 100 * Math.random() - 50;
positions[ index + 2 ] = 100 * Math.random() - 50;
}
for ( var i = 0, index = 0, l = numVertices; i < l; i ++, index += 2 ) {
offset.set( THREE.Math.randInt( 1, 3 ), THREE.Math.randInt( 2, 3 ) ).multiplyScalar( 0.25 ); // sprite sheet: 4 rows x 4 cols
offsets[ index ] = offset.x;
offsets[ index + 1 ] = offset.y;
}
// image
image = document.createElement( 'img' );
image.addEventListener( 'load', function ( event ) { texture.needsUpdate = true; } );
// texture
var texture = new THREE.Texture( image );
// uniforms
uniforms = {
spriteSheet: { value: texture },
repeat: { value: new THREE.Vector2( 0.25, 0.25 ) }
};
// material
var material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: document.getElementById( 'vertexshader' ).textContent,
fragmentShader: document.getElementById( 'fragmentshader' ).textContent,
transparent: true
} );
// point cloud
pointCloud = new THREE.Points( geometry, material );
scene.add( pointCloud );
fiddle:https://jsfiddle.net/nL0b6hco/
three.js r.137.4