Transparent point is not transparent at some degrees in THREE.js - three.js

demo link
//vertexShader
attribute float percent;
varying float vPercent;
void main() {
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
gl_Position = projectionMatrix * mvPosition;
gl_PointSize = 10.0;
vPercent = percent;
}
//fragmentShader
varying float vPercent;
void main() {
gl_FragColor = vec4( vec3(0.0), vPercent );
}
let points = [];
for(let i = 0; i < 1000; i++) {
points.push(new THREE.Vector3(i / 1000, 1, 0));
}
let geometry = new THREE.BufferGeometry().setFromPoints(points);
let percents = new Float32Array(1000);
for(let i = 0; i < 1000; i++) {
percents[i] = i / 1000;
}
geometry.addAttribute('percent', new THREE.BufferAttribute(percents, 1));
let line = new THREE.Points(geometry,
new THREE.ShaderMaterial({
vertexShader: shader_content["vertexShader"],
fragmentShader: shader_content["fragmentShader"],
vertexColors: THREE.VertexColors,
transparent: true
})
);
scene.add(line);
Rotate the scene, points are all black at some degrees. THREE.js 109.
Is it a bug or it is just what it supposed to be?
Screenshots may explain better.
all black picture
normal picture

What you see is a depth sorting issue that can be avoid by setting depthWrite of your shader material to false. The points do not flicker anymore and you always see the expected result (which actually corresponds to your black picture).

Related

Decompose a GLSL mat4 to original RTS values within vertex shader to calculate a View UV Offset

I need to get the rotation differences between the model and the camera, convert the values to radians/degrees, and pass it to the fragment shader.
For that I need to decompose and the Model rotation matrix and maybe the camera view matrix as well. I cannot seem to find a way to decompose mechanism suitable within a shader.
The rotation details goes into fragment shader to calculate uv offset.
original_rotation + viewing_angles to calculate a final sprite-like offset of the following texture and shown as billboards.
Ultimately UV should offset downwards (ex:H3 to A3) looking from down, upwards looking from up (ex:A3 to H3), left to right looking and viceversa looking from sides (ex: D1 to D8 and viceversa).
const vertex_shader = `
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
attribute vec3 position;
attribute vec2 uv;
attribute mat4 instanceMatrix;
attribute float index;
attribute float texture_index;
uniform vec2 rows_cols;
uniform vec3 camera_location;
varying float vTexIndex;
varying vec2 vUv;
varying vec4 transformed_normal;
float normal_to_orbit(vec3 rotation_vector, vec3 view_vector){
rotation_vector = normalize(rotation_vector);
view_vector = normalize(view_vector);
vec3 x_direction = vec3(1.0,0,0);
vec3 y_direction = vec3(0,1.0,0);
vec3 z_direction = vec3(0,0,1.0);
float rotation_x_length = dot(rotation_vector, x_direction);
float rotation_y_length = dot(rotation_vector, y_direction);
float rotation_z_length = dot(rotation_vector, z_direction);
float view_x_length = dot(view_vector, x_direction);
float view_y_length = dot(view_vector, y_direction);
float view_z_length = dot(view_vector, z_direction);
//TOP
float top_rotation = degrees(atan(rotation_x_length, rotation_z_length));
float top_view = degrees(atan(view_x_length, view_z_length));
float top_final = top_view-top_rotation;
float top_idx = floor(top_final/(360.0/rows_cols.x));
//FRONT
float front_rotation = degrees(atan(rotation_x_length, rotation_z_length));
float front_view = degrees(atan(view_x_length, view_z_length));
float front_final = front_view-front_rotation;
float front_idx = floor(front_final/(360.0/rows_cols.y));
return abs((front_idx*rows_cols.x)+top_idx);
}
vec3 extractEulerAngleXYZ(mat4 mat) {
vec3 rotangles = vec3(0,0,0);
rotangles.x = atan(mat[2].z, -mat[1].z);
float cosYangle = sqrt(pow(mat[0].x, 2.0) + pow(mat[0].y, 2.0));
rotangles.y = atan(cosYangle, mat[0].z);
float sinXangle = sin(rotangles.x);
float cosXangle = cos(rotangles.x);
rotangles.z = atan(cosXangle * mat[1].y + sinXangle * mat[2].y, cosXangle * mat[1].x + sinXangle * mat[2].x);
return rotangles;
}
float view_index(vec3 position, mat4 mv_matrix, mat4 rot_matrix){
vec4 posInView = mv_matrix * vec4(0.0, 0.0, 0.0, 1.0);
// posInView /= posInView[3];
vec3 VinView = normalize(-posInView.xyz); // (0, 0, 0) - posInView
// vec4 NinView = normalize(rot_matrix * vec4(0.0, 0.0, 1.0, 1.0));
// float NdotV = dot(NinView, VinView);
vec4 view_normal = rot_matrix * vec4(VinView.xyz, 1.0);
float view_x_length = dot(view_normal.xyz, vec3(1.0,0,0));
float view_y_length = dot(view_normal.xyz, vec3(0,1.0,0));
float view_z_length = dot(view_normal.xyz, vec3(0,0,1.0));
// float radians = atan(-view_x_length, -view_z_length);
float radians = atan(view_x_length, view_z_length);
// float angle = radians/PI*180.0 + 180.0;
float angle = degrees(radians);
if (radians < 0.0) { angle += 360.0; }
if (0.0<=angle && angle<=360.0){
return floor(angle/(360.0/rows_cols.x));
}
return 0.0;
}
void main(){
vec4 original_normal = vec4(0.0, 0.0, 1.0, 1.0);
// transformed_normal = modelViewMatrix * instanceMatrix * original_normal;
vec3 rotangles = extractEulerAngleXYZ(modelViewMatrix * instanceMatrix);
// transformed_normal = vec4(rotangles.xyz, 1.0);
transformed_normal = vec4(camera_location.xyz, 1.0);
vec4 v = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 0.0, 1.0)) + vec4(position.x, position.y, 0.0, 0.0) * vec4(1.0, 1.0, 1.0, 1.0);
vec4 model_center = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 0.0, 1.0));
vec4 model_normal = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 1.0, 1.0));
vec4 cam_loc = vec4(camera_location.xyz, 1.0);
vec4 view_vector = normalize((cam_loc-model_center));
//float findex = normal_to_orbit(model_normal.xyz, view_vector.xyz);
float findex = view_index(position, base_matrix, combined_rot);
vTexIndex = texture_index;
vUv = vec2(mod(findex,rows_cols.x)/rows_cols.x, floor(findex/rows_cols.x)/rows_cols.y) + (uv / rows_cols);
//vUv = vec2(mod(index,rows_cols.x)/rows_cols.x, floor(index/rows_cols.x)/rows_cols.y) + (uv / rows_cols);
gl_Position = projectionMatrix * v;
// gl_Position = projectionMatrix * modelViewMatrix * instanceMatrix * vec4(position, 1.0);
}
`
const fragment_shader = (texture_count) => {
var fragShader = `
precision highp float;
uniform sampler2D textures[${texture_count}];
varying float vTexIndex;
varying vec2 vUv;
varying vec4 transformed_normal;
void main() {
vec4 finalColor;
`;
for (var i = 0; i < texture_count; i++) {
if (i == 0) {
fragShader += `if (vTexIndex < ${i}.5) {
finalColor = texture2D(textures[${i}], vUv);
}
`
} else {
fragShader += `else if (vTexIndex < ${i}.5) {
finalColor = texture2D(textures[${i}], vUv);
}
`
}
}
//fragShader += `gl_FragColor = finalColor * transformed_normal; }`;
fragShader += `gl_FragColor = finalColor; }`;
// fragShader += `gl_FragColor = startColor * finalColor; }`;
// int index = int(v_TexIndex+0.5); //https://stackoverflow.com/questions/60896915/texture-slot-not-getting-picked-properly-in-shader-issue
//console.log('frag shader: ', fragShader)
return fragShader;
}
function reset_instance_positions() {
const dummy = new THREE.Object3D();
const offset = 500*4
for (var i = 0; i < max_instances; i++) {
dummy.position.set(offset-(Math.floor(i % 8)*500), offset-(Math.floor(i / 8)*500), 0);
dummy.updateMatrix();
mesh.setMatrixAt(i, dummy.matrix);
}
mesh.instanceMatrix.needsUpdate = true;
}
function setup_geometry() {
const geometry = new THREE.InstancedBufferGeometry().copy(new THREE.PlaneBufferGeometry(400, 400));
const index = new Float32Array(max_instances * 1); // index
for (let i = 0; i < max_instances; i++) {
index[i] = (i % max_instances) * 1.0 /* index[i] = 0.0 */
}
geometry.setAttribute("index", new THREE.InstancedBufferAttribute(index, 1));
const texture_index = new Float32Array(max_instances * 1); // texture_index
const max_maps = 1
for (let i = 0; i < max_instances; i++) {
texture_index[i] = (Math.floor(i / max_instances) % max_maps) * 1.0 /* index[i] = 0.0 */
}
geometry.setAttribute("texture_index", new THREE.InstancedBufferAttribute(texture_index, 1));
const textures = [texture]
const grid_xy = new THREE.Vector2(8, 8)
mesh = new THREE.InstancedMesh(geometry,
new THREE.RawShaderMaterial({
uniforms: {
textures: {
type: 'tv',
value: textures
},
rows_cols: {
value: new THREE.Vector2(grid_xy.x * 1.0, grid_xy.y * 1.0)
},
camera_location: {
value: camera.position
}
},
vertexShader: vertex_shader,
fragmentShader: fragment_shader(textures.length),
side: THREE.DoubleSide,
// transparent: true,
}), max_instances);
scene.add(mesh);
reset_instance_positions()
}
var camera, scene, mesh, renderer;
const max_instances = 64
function init() {
camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight,1, 10000 );
camera.position.z = 1024;
scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
setup_geometry()
var canvas = document.createElement('canvas');
var context = canvas.getContext('webgl2');
renderer = new THREE.WebGLRenderer({
canvas: canvas,
context: context
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
window.addEventListener('resize', onWindowResize, false);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
var dataurl = "https://i.stack.imgur.com/accaU.png"
var texture;
var imageElement = document.createElement('img');
imageElement.onload = function(e) {
texture = new THREE.Texture(this);
texture.needsUpdate = true;
init();
animate();
};
imageElement.src = dataurl;
JSFiddle of work so far
So You got 4x4 transform matrix M used on xy plane QUAD and want to map its 4 corners (p0,p1,p2,p3) to your texture with "repaeat" like manner (crossing border from left/right/up/down will return right/left/down/up) based on direction of Z axis of the matrix.
You face 2 problems...
M rotation is 3 DOF and you want just 2 DOF (yaw,pitch) so if roll present the result might be questionable
if texture crosses borders you need to handle this in GLSL to avoid seems
so either do this in geometry shader and divide the quad to more if needed or use enlarged texture where you have the needed overlaps ...
Now if I did not miss something the conversion is like this:
const float pi=3.1415926535897932384626433832795;
vec3 d = normalize(z axis from M);
vec2 dd = normalize(d.xy);
u = atan2(dd.y,dd.x);
v = acos(d.z);
u = (u+pi)/(2.0*pi);
v = v/pi
The z axis extraction is just simple copy of 3th column/row (depends on your notation) from your matrix 'M' or transforming (1,0,0,0) by it. For more info see:
Understanding 4x4 homogenous transform matrices
In case of overlapped texture you need to add also this:
const float ov = 1.0/8.0; // overlap size
u = ov + (u/(ov+ov+1.0));
v = ov + (v/(ov+ov+1.0));
And the texture would look like:
In case your quads cover more than 1/8 of your original texture you need to enlarge the overlap ...
Now to handle the corners of QUAD instead of just axis you could translate the quad by distance l in Z+ direction in mesh local coordinates, apply the M on them and use those 4 points as directions to compute u,v in vertex shader. The l will affect how much of the texture area is used for quad ... This approach might even handle roll but did not test any of this yet...
After implementing it my fears was well grounded as any 2 euler angles affect each other so the result is OK on most of the directions but in edge cases the stuff get mirrored and or jumped in one or both axises probably due to area coverage difference between 3 DOF and 2 DOF (unless I made a bug in my code or the math was not computed correctly in vertex which happened to me before due to bug in drivers)
If you going for azimut/elevation that should be fine as its 2 DOF too the equation above shoul dwork for them too +/- some range conversion if needed.

Push particles away from mouseposition in glsl and three js

I have the following setup for my THREE.Points Object:
this.particleGeometry = new THREE.BufferGeometry()
this.particleMaterial = new THREE.ShaderMaterial(
{
vertexShader: vshader,
fragmentShader: fshader,
blending: THREE.AdditiveBlending,
depthWrite: false,
uniforms: {
uTime: new THREE.Uniform(0),
uMousePosition: this.mousePosition
}
}
)
and then some code to place points in the BufferGeometry on a sphere. That is working fine.
I also set up a Raycaster to track the mouse position intersecting a hidden plane and then update the uniform uMousePosition accordingly. That also works fine, I get the mouse position sent to my vertex shader.
Now I am trying to make the particles that are in a certain distance d to the mouse push away from it where the closest ones are pushed most of course, and also apply a gravity back to their original position to restore everything after time.
So here is what I have in my vertex shader:
void main() {
float lerp(float a, float b, float amount) {
return a + (b - a) * amount;
}
void main() {
vec3 p = position;
float dist = min(distance(p, mousePosition), 1.);
float lerpFactor = .2;
p.x = lerp(p.x, position.x * dist, lerpFactor);
p.y = lerp(p.y, position.y * dist, lerpFactor);
p.z = lerp(p.z, position.z * dist, lerpFactor);//Mouse is always in z=0
vec4 mvPosition = modelViewMatrix * vec4(p, 1.);
gl_PointSize = 30. * (1. / -mvPosition.z );
gl_Position = projectionMatrix * mvPosition;
}
}
And here is what it looks like when the mouse is outside the sphere (added a small sphere that moves with the mouseposition to indicate the mouseposition)
And here when the mouse is inside:
Outside already looks kind of correct, but mouse inside only moves the particles closer back to their original position, where it should push them further outside instead. I guess I somehow have to determine the direction of the distance.
Also, the lerp method does not lerp, the particles directly jump to their position.
So I wonder how I get the correct distance to the mouse to always move the particles in a certain area and also how to animate the lerp / gravity effect.
That's how you could do it as a first approximation:
body{
overflow: hidden;
margin: 0;
}
<script type="module">
import * as THREE from "https://cdn.skypack.dev/three#0.136.0";
import {OrbitControls} from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/controls/OrbitControls.js";
import * as BufferGeometryUtils from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/utils/BufferGeometryUtils.js";
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 100);
camera.position.set(0, 0, 10);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new OrbitControls(camera, renderer.domElement);
let marker = new THREE.Mesh(new THREE.SphereGeometry(0.5, 16, 8), new THREE.MeshBasicMaterial({color: "red", wireframe: true}));
scene.add(marker);
let g = new THREE.IcosahedronGeometry(4, 20);
g = BufferGeometryUtils.mergeVertices(g);
let uniforms = {
mousePos: {value: new THREE.Vector3()}
}
let m = new THREE.PointsMaterial({
size: 0.1,
onBeforeCompile: shader => {
shader.uniforms.mousePos = uniforms.mousePos;
shader.vertexShader = `
uniform vec3 mousePos;
${shader.vertexShader}
`.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
vec3 seg = position - mousePos;
vec3 dir = normalize(seg);
float dist = length(seg);
if (dist < 2.){
float force = clamp(1. / (dist * dist), 0., 1.);
transformed += dir * force;
}
`
);
console.log(shader.vertexShader);
}
});
let p = new THREE.Points(g, m);
scene.add(p);
let clock = new THREE.Clock();
renderer.setAnimationLoop( _ => {
let t = clock.getElapsedTime();
marker.position.x = Math.sin(t * 0.5) * 5;
marker.position.y = Math.cos(t * 0.3) * 5;
uniforms.mousePos.value.copy(marker.position);
renderer.render(scene, camera);
})
</script>

Shader for solid color for each triangle of plane geometry

I have been learning webgl from webglfundamentals where I came across a simple shader example where you can paint triangles with solid color. Here is link to tutorial and original demo.
I tried to create same effect in three js using plane geometry but I can't manage achieve solid color shader. When I use almost same setup in Three js, I get more like gradient effect. What am I doing wrong here? (I am noticing that my shader isn't consistent either as it renders differently on refresh) Also, is there place to learn shaders specifically for three js?
var vShader = `
precision mediump float;
precision mediump int;
attribute vec4 a_color;
varying vec4 vColor;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vColor = a_color;
}`;
var fShader = ` precision mediump float;
precision mediump int;
varying vec4 vColor;
void main() {
vec4 color = vec4( vColor );
gl_FragColor = vColor;
}`;
var row = 1;
var col = 1;
var w = 600;
var h = 400;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, w / h, 0.1, 100);
renderer = new THREE.WebGLRenderer();
camera.position.z = 5;
var viewSize = getViewSize(camera);
document.body.appendChild(renderer.domElement);
renderer.setSize(w, h);
var geometry = new THREE.PlaneBufferGeometry(viewSize.width, viewSize.height, col, row);
var color = new THREE.Color();
const blossomPalette = [0xff0000, 0xff0000, 0xff0000, 0x0000ff, 0x0000ff, 0x0000ff];
var colors = new Float32Array(4 * 2 * 3 * col * row);
for (let i = 0; i < 4; i++) {
color.setHex(blossomPalette[Math.floor(Math.random() * blossomPalette.length)]);
color.toArray(colors, i * 3);
}
geometry.setAttribute('a_color', new THREE.BufferAttribute(colors, 4, false));
var material = new THREE.ShaderMaterial({
vertexShader: vShader,
fragmentShader: fShader,
transparent: true,
blending: THREE.AdditiveBlending,
depthTest: false,
vertexColors: true,
flatShading: true
});
var plane = new THREE.Mesh(geometry, material);
scene.add(plane);
function animate() {
renderer.render(scene, camera);
requestAnimationFrame(animate)
}
animate();
function getViewSize(camera) {
var fovInRadians = (camera.fov * Math.PI) / 180;
var height = Math.abs(camera.position.z * Math.tan(fovInRadians / 2) * 2);
return {
width: height * camera.aspect,
height: height
}
}
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r122/build/three.min.js"></script>
The PlaneBufferGeometry in three.js is using indexed vertices to share vertices so there are only 4 vertices and then 6 indices to use those 4 vertices to make 2 triangles. That means you can't give each triangle different solid colors because they share 2 vertices and a vertex can only have 1 color.
Further, the code is choosing random colors for each vertex so even if you you used 6 vertices so that the 2 triangles didn't share any you still wouldn't get the result you linked to, instead you'd get this result which is further down the page on the same tutorial.
Finally the code is only generating 3 floats per color so you need to set the number of components for the color attribute to 3 instead of 4
If you want to repeat the webgl sample you'll need to provide your own 6 vertices.
var vShader = `
precision mediump float;
precision mediump int;
attribute vec4 a_color;
varying vec4 vColor;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vColor = a_color;
}`;
var fShader = ` precision mediump float;
precision mediump int;
varying vec4 vColor;
void main() {
vec4 color = vec4( vColor );
gl_FragColor = vColor;
}`;
var row = 1;
var col = 1;
var w = 600;
var h = 400;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, w / h, 0.1, 100);
renderer = new THREE.WebGLRenderer();
camera.position.z = 5;
var viewSize = getViewSize(camera);
document.body.appendChild(renderer.domElement);
renderer.setSize(w, h);
var geometry = new THREE.BufferGeometry();
var x = viewSize.width / 2;
var y = viewSize.height / 2;
var positions = new Float32Array([
-x, -y, 0,
x, -y, 0,
-x, y, 0,
-x, y, 0,
x, -y, 0,
x, y, 0,
]);
geometry.setAttribute('position', new THREE.BufferAttribute(positions, 3, false));
var color = new THREE.Color();
const blossomPalette = [
0xff0000, 0xff0000, 0xff0000,
0x0000ff, 0x0000ff, 0x0000ff,
];
var colors = new Float32Array(2 * 3 * 3 * col * row);
for (let i = 0; i < 6; i++) {
color.setHex(blossomPalette[i]);
color.toArray(colors, i * 3);
}
geometry.setAttribute('a_color', new THREE.BufferAttribute(colors, 3, false));
var material = new THREE.ShaderMaterial({
vertexShader: vShader,
fragmentShader: fShader,
transparent: true,
blending: THREE.AdditiveBlending,
depthTest: false,
vertexColors: true,
flatShading: true
});
var plane = new THREE.Mesh(geometry, material);
scene.add(plane);
function animate() {
renderer.render(scene, camera);
requestAnimationFrame(animate)
}
animate();
function getViewSize(camera) {
var fovInRadians = (camera.fov * Math.PI) / 180;
var height = Math.abs(camera.position.z * Math.tan(fovInRadians / 2) * 2);
return {
width: height * camera.aspect,
height: height
}
}
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r122/build/three.min.js"></script>
See this article

FBO Particles with Cumulative Movement

Link to thread threejs discourse: https://discourse.threejs.org/t/fbo-particles-with-cumulative-movement/7221
This is difficult for me to explain because of my limited knowledge on the subject, but I'm gonna do my best..
At this point, I have a basic FBO particle system in place that works. The following is how it's set up:
var FBO = function( exports ){
var scene, orthoCamera, rtt;
exports.init = function( width, height, renderer, simulationMaterial, renderMaterial ){
var gl = renderer.getContext();
//1 we need FLOAT Textures to store positions
//https://github.com/KhronosGroup/WebGL/blob/master/sdk/tests/conformance/extensions/oes-texture-float.html
if (!gl.getExtension("OES_texture_float")){
throw new Error( "float textures not supported" );
}
//2 we need to access textures from within the vertex shader
//https://github.com/KhronosGroup/WebGL/blob/90ceaac0c4546b1aad634a6a5c4d2dfae9f4d124/conformance-suites/1.0.0/extra/webgl-info.html
if( gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) == 0 ) {
throw new Error( "vertex shader cannot read textures" );
}
//3 rtt setup
scene = new THREE.Scene();
orthoCamera = new THREE.OrthographicCamera(-1,1,1,-1,1/Math.pow( 2, 53 ),1 );
//4 create a target texture
var options = {
minFilter: THREE.NearestFilter,//important as we want to sample square pixels
magFilter: THREE.NearestFilter,//
format: THREE.RGBAFormat,//180407 changed to RGBAFormat
type:THREE.FloatType//important as we need precise coordinates (not ints)
};
rtt = new THREE.WebGLRenderTarget( width,height, options);
//5 the simulation:
//create a bi-unit quadrilateral and uses the simulation material to update the Float Texture
var geom = new THREE.BufferGeometry();
geom.addAttribute( 'position', new THREE.BufferAttribute( new Float32Array([ -1,-1,0, 1,-1,0, 1,1,0, -1,-1, 0, 1, 1, 0, -1,1,0 ]), 3 ) );
geom.addAttribute( 'uv', new THREE.BufferAttribute( new Float32Array([ 0,1, 1,1, 1,0, 0,1, 1,0, 0,0 ]), 2 ) );
scene.add( new THREE.Mesh( geom, simulationMaterial ) );
//6 the particles:
//create a vertex buffer of size width * height with normalized coordinates
var l = (width * height );
var vertices = new Float32Array( l * 3 );
for ( var i = 0; i < l; i++ ) {
var i3 = i * 3;
vertices[ i3 ] = ( i % width ) / width ;
vertices[ i3 + 1 ] = ( i / width ) / height;
}
//create the particles geometry
var geometry = new THREE.BufferGeometry();
geometry.addAttribute( 'position', new THREE.BufferAttribute( vertices, 3 ) );
//the rendermaterial is used to render the particles
exports.particles = new THREE.Points( geometry, renderMaterial );
exports.particles.frustumCulled = false;
exports.renderer = renderer;
};
//7 update loop
exports.update = function(){
//1 update the simulation and render the result in a target texture
// exports.renderer.render( scene, orthoCamera, rtt, true );
exports.renderer.setRenderTarget( rtt );
exports.renderer.render( scene, orthoCamera );
exports.renderer.setRenderTarget( null );
//2 use the result of the swap as the new position for the particles' renderer
// had to add .texture on the end of rtt for r103
exports.particles.material.uniforms.positions.value = rtt.texture;
};
return exports;
}({});
The following are the shaders it uses:
<script type="x-shader/x-vertex" id="simulation_vs">
//vertex shader
varying vec2 vUv;
void main() {
vUv = vec2(uv.x, uv.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script type="x-shader/x-fragment" id="simulation_fs">
//fragment Shader
uniform sampler2D positions;//DATA Texture containing original positions
varying vec2 vUv;
void main() {
//basic simulation: displays the particles in place.
vec3 pos = texture2D( positions, vUv ).rgb;
// we can move the particle here
gl_FragColor = vec4( pos,1.0 );
}
</script>
<script type="x-shader/x-vertex" id="render_vs">
//vertex shader
uniform sampler2D positions;//RenderTarget containing the transformed positions
uniform float pointSize;//size
void main() {
//the mesh is a nomrliazed square so the uvs = the xy positions of the vertices
vec3 pos = texture2D( positions, position.xy ).xyz;
//pos now contains a 3D position in space, we can use it as a regular vertex
//regular projection of our position
gl_Position = projectionMatrix * modelViewMatrix * vec4( pos, 1.0 );
//sets the point size
gl_PointSize = pointSize;
}
</script>
<script type="x-shader/x-fragment" id="render_fs">
//fragment shader
void main()
{
gl_FragColor = vec4( vec3( 1. ), .25 );
}
</script>
I understand that I would move the particles in the "simulation_fs", but if I move a particle in that shader, if I try to do something like this,
pos.x += 1.0;
it will still only shift it one unit from the original texture position. I want the movement to be cumulative.
Would using a 2nd set of simulation shaders allow me to move the particles in a cumulative way? Is that a practical solution?
For cumulative movement, you need to use uniforms:
Look into passing a uniform named time to your vertex shader. Then you can update the time once per frame, and you can use that to animate your vertex positions. For example:
position.x = 2.0 * time; // Increment linearly
position.x = sin(time); // Sin wave back-forth animation
Without a changing variable, your vertex animations will be static from one frame to the next.
I needed to accomplish something like this, and set up an absolutely minimal example that I can tweak in the future. You'll see the positional changes are cumulative.
The following was simplified from a wonderful discussion of FBO's by Nicolas Barradeau (a webgl wizard):
// specify the container where we'll render the scene
var elem = document.querySelector('body'),
elemW = elem.clientWidth,
elemH = elem.clientHeight
// generate a scene object
var scene = new THREE.Scene();
// generate a camera
var camera = new THREE.PerspectiveCamera(75, elemW/elemH, 0.001, 100);
// generate a renderer
var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(elemW, elemH);
elem.appendChild(renderer.domElement);
// generate controls
var controls = new THREE.TrackballControls(camera, renderer.domElement);
// position camera and controls
camera.position.set(0.5, 0.5, -5);
controls.target = new THREE.Vector3(0.5, 0.5, 0);
/**
* FBO
**/
// verify browser agent supports "frame buffer object" features
gl = renderer.getContext();
if (!gl.getExtension('OES_texture_float') ||
gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) == 0) {
alert(' * Cannot create FBO :(');
}
// set initial positions of `w*h` particles
var w = h = 256,
i = 0,
data = new Float32Array(w*h*3);
for (var x=0; x<w; x++) {
for (var y=0; y<h; y++) {
data[i++] = x/w;
data[i++] = y/h;
data[i++] = 0;
}
}
// feed those positions into a data texture
var dataTex = new THREE.DataTexture(data, w, h, THREE.RGBFormat, THREE.FloatType);
dataTex.minFilter = THREE.NearestFilter;
dataTex.magFilter = THREE.NearestFilter;
dataTex.needsUpdate = true;
// add the data texture with positions to a material for the simulation
var simMaterial = new THREE.RawShaderMaterial({
uniforms: { posTex: { type: 't', value: dataTex }, },
vertexShader: document.querySelector('#sim-vs').textContent,
fragmentShader: document.querySelector('#sim-fs').textContent,
});
// delete dataTex; it isn't used after initializing point positions
delete dataTex;
THREE.FBO = function(w, simMat) {
this.scene = new THREE.Scene();
this.camera = new THREE.OrthographicCamera(-w/2, w/2, w/2, -w/2, -1, 1);
this.scene.add(new THREE.Mesh(new THREE.PlaneGeometry(w, w), simMat));
};
// create a scene where we'll render the positional attributes
var fbo = new THREE.FBO(w, simMaterial);
// create render targets a + b to which the simulation will be rendered
var renderTargetA = new THREE.WebGLRenderTarget(w, h, {
wrapS: THREE.RepeatWrapping,
wrapT: THREE.RepeatWrapping,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
format: THREE.RGBFormat,
type: THREE.FloatType,
stencilBuffer: false,
});
// a second render target lets us store input + output positional states
renderTargetB = renderTargetA.clone();
// render the positions to the render targets
renderer.render(fbo.scene, fbo.camera, renderTargetA, false);
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// store the uv attrs; each is x,y and identifies a given point's
// position data within the positional texture; must be scaled 0:1!
var geo = new THREE.BufferGeometry(),
arr = new Float32Array(w*h*3);
for (var i=0; i<arr.length; i++) {
arr[i++] = (i%w)/w;
arr[i++] = Math.floor(i/w)/h;
arr[i++] = 0;
}
geo.addAttribute('position', new THREE.BufferAttribute(arr, 3, true))
// create material the user sees
var material = new THREE.RawShaderMaterial({
uniforms: {
posMap: { type: 't', value: null }, // `posMap` is set each render
},
vertexShader: document.querySelector('#ui-vert').textContent,
fragmentShader: document.querySelector('#ui-frag').textContent,
transparent: true,
});
// add the points the user sees to the scene
var mesh = new THREE.Points(geo, material);
scene.add(mesh);
function render() {
// at the start of the render block, A is one frame behind B
var oldA = renderTargetA; // store A, the penultimate state
renderTargetA = renderTargetB; // advance A to the updated state
renderTargetB = oldA; // set B to the penultimate state
// pass the updated positional values to the simulation
simMaterial.uniforms.posTex.value = renderTargetA.texture;
// run a frame and store the new positional values in renderTargetB
renderer.render(fbo.scene, fbo.camera, renderTargetB, false);
// pass the new positional values to the scene users see
material.uniforms.posMap.value = renderTargetB.texture;
// render the scene users see as normal
renderer.render(scene, camera);
controls.update();
requestAnimationFrame(render);
};
render();
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/101/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/TrackballControls.js"></script>
<!-- The simulation shaders update positional attributes -->
<script id='sim-vs' type='x-shader/x-vert'>
precision mediump float;
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 uv; // x,y offsets of each point in texture
attribute vec3 position;
varying vec2 vUv;
void main() {
vUv = vec2(uv.x, 1.0 - uv.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script id='sim-fs' type='x-shader/x-frag'>
precision mediump float;
uniform sampler2D posTex;
varying vec2 vUv;
void main() {
// read the supplied x,y,z vert positions
vec3 pos = texture2D(posTex, vUv).xyz;
// update the positional attributes here!
pos.x += cos(pos.y) / 100.0;
pos.y += tan(pos.x) / 100.0;
// render the new positional attributes
gl_FragColor = vec4(pos, 1.0);
}
</script>
<!-- The ui shaders render what the user sees -->
<script id='ui-vert' type='x-shader/x-vert'>
precision mediump float;
uniform sampler2D posMap; // contains positional data read from sim-fs
uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
attribute vec2 position;
void main() {
// read this particle's position, which is stored as a pixel color
vec3 pos = texture2D(posMap, position.xy).xyz;
// project this particle
vec4 mvPosition = modelViewMatrix * vec4(pos, 1.0);
gl_Position = projectionMatrix * mvPosition;
// set the size of each particle
gl_PointSize = 0.3 / -mvPosition.z;
}
</script>
<script id='ui-frag' type='x-shader/x-frag'>
precision mediump float;
void main() {
gl_FragColor = vec4(0.0, 0.5, 1.5, 1.0);
}
</script>

How to mix / blend between 2 vertex positions based on distance from camera?

I'm trying to mix / blend between 2 different vertex positions depending on the distance from the camera. Specifically, I'm trying to create an effect that blends between a horizontal plane closer to the camera and a vertical plane in the distance. The result should be a curved plane going away and up from the current camera position.
I want to blend from this (a plane flat on the ground):
To this (the same plane, just rotated 90 degrees):
The implementation I have so far feels close but I just can't put my finger on what pieces I need to finish it. I took an approach from a similar Tangram demo (shader code), however I'm unable to get results anywhere near this. The Tangram example is also using a complete different setup to what I'm using in Three.js so I've not been able to replicate everything.
This is what I have so far: https://jsfiddle.net/robhawkes/a97tu864/
varying float distance;
mat4 rotateX(float rotationX) {
return mat4(
vec4(1.0,0.0,0.0,0.0),
vec4(0.0,cos(rotationX),-sin(rotationX),0.0),
vec4(0.0,sin(rotationX),cos(rotationX),0.0),
vec4(0.0,0.0,0.0,1.0)
);
}
void main()
{
vec4 vPosition = vec4(position, 1.0);
vec4 modelViewPosition = modelViewMatrix * vPosition;
float bend = radians(-90.0);
vec4 newPos = rotateX(bend) * vPosition;
distance = -modelViewPosition.z;
// Show bent position
//gl_Position = projectionMatrix * modelViewMatrix * newPos;
float factor = 0.0;
//if (vPosition.x > 0.0) {
// factor = 1.0;
//}
//factor = clamp(0.0, 1.0, distance / 2000.0);
vPosition = mix(vPosition, newPos, factor);
gl_Position = projectionMatrix * modelViewMatrix * vPosition;
}
I'm doing the following:
Calculate the rotated position of the vertex (the vertical version)
Find the distance from the vertex to the camera
Use mix to blend between the horizontal position and vertical position depending on the distance
I've tried multiple approaches and I just can't seem to get it to work correctly.
Any ideas? Even pointing me down the right path will be immensely helpful as my shader/matrix knowledge is limited.
The major issue is, that you tessellate the THREE.PlaneBufferGeometry in width segments, but not in height segments:
groundGeometry = new THREE.PlaneBufferGeometry(
1000, 10000,
100, // <----- widthSegments
100 ); // <----- heightSegments is missing
Now you can use the z coordinate of the view space for the interpolation:
float factor = -modelViewPosition.z / 2000.0;
var camera, controls, scene, renderer;
var groundGeometry, groundMaterial, groundMesh;
var ambientLight;
init();
initLight();
initGround();
animate();
function init() {
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 0.01, 10000 );
camera.position.y = 500;
camera.position.z = 1000;
controls = new THREE.MapControls( camera );
controls.maxPolarAngle = Math.PI / 2;
scene = new THREE.Scene();
scene.add(camera);
var axesHelper = new THREE.AxesHelper( 500 );
scene.add( axesHelper );
renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
}
function initLight() {
ambientLight = new THREE.AmbientLight( 0x404040 );
scene.add( ambientLight );
}
function initGround() {
groundMaterial = new THREE.ShaderMaterial({
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent,
transparent: true
});
groundGeometry = new THREE.PlaneBufferGeometry( 1000, 10000, 100, 100 );
groundMesh = new THREE.Mesh( groundGeometry, groundMaterial );
groundMesh.position.z = -3000;
groundMesh.position.y = -100;
groundMesh.rotateX(-Math.PI / 2)
scene.add( groundMesh );
}
function animate() {
requestAnimationFrame( animate );
controls.update();
renderer.render( scene, camera );
}
<script type="x-shader/x-vertex" id="vertexShader">
varying float distance;
mat4 rotateX(float rotationX) {
return mat4(
vec4(1.0,0.0,0.0,0.0),
vec4(0.0,cos(rotationX),-sin(rotationX),0.0),
vec4(0.0,sin(rotationX),cos(rotationX),0.0),
vec4(0.0,0.0,0.0,1.0)
);
}
void main()
{
vec4 vPosition = vec4(position, 1.0);
vec4 modelViewPosition = modelViewMatrix * vPosition;
float bend = radians(-90.0);
vec4 newPos = rotateX(bend) * vPosition;
distance = -modelViewPosition.z;
float factor = -modelViewPosition.z / 2000.0;
vPosition = mix(vPosition, newPos, factor);
gl_Position = projectionMatrix * modelViewMatrix * vPosition;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
varying float distance;
void main() {
if (distance < 3000.0) {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
} else {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
}
</script>
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://rawgit.com/mrdoob/three.js/dev/examples/js/controls/MapControls.js"></script>

Resources