How to change animation speed during render in Three.js - three.js

I've create a simple PlaneBufferGeomoetry in THREE.js
Geometry
const geometry = new THREE.PlaneBufferGeometry(10, 10, 10, 10);
const material = new THREE.MeshBasicMaterial( { side: THREE.DoubleSide, wireframe: true } );
const mesh = new THREE.Mesh( geometry, material );
const positions = geometry.attributes.position.array;
scene.add(mesh);
I'm then looping through each of the array segments in order to randomly change the z axis position during the render.
function animate() {
render()
requestAnimationFrame( animate );
}
function render() {
mesh.rotateZ(0.001)
const geoArray = geometry.attributes.position;
for (var i = 0; i < geoArray.count; i++) {
geoArray.setZ(i, Math.round(Math.random() * -2));
}
mesh.geometry.attributes.position.needsUpdate = true;
camera.updateProjectionMatrix()
renderer.render(scene, camera)
}
This works fine, however the animation speed is way too fast. How can i slow down the time during render in order for the z axis positions to smoothly change?
I have tried setting a setTimeOut function during the animate function, however this just slows the framerate down and not the animation itslef.
Thankyou

Right now, you are animating with fixed scalar values in your animation which is normally something you don't want to do in production apps. You should always take a time delta value into account so your animation is framerate independent. This makes it also easier to scale the time of your animation. So create an instance of THREE.Clock in your app like so:
const clock = new THREE.Clock();
In your animation loop, retrieve the time delta value for the current frame and use it:
const timeDelta = clock.getDelta();
mesh.rotateZ(timeDelta);
If you want to change the animation speed, multiply the delta value with another scalar representing your time scale value. E.g:
timeDelta *= 0.1; // this will slow down the animation

Related

Mangled rendering when transforming scene coordinates instead of camera coordinates

I've been learning how to integrate ThreeJS with Mapbox, using this example. It struck me as weird that the approach is to leave the loaded model in its own coordinate system, and transform the camera location on render. So I attempted to rewrite the code, so that the GLTF model is transformed when loaded, then the ThreeJS camera is just synchronised with the Mapbox camera, with no further modifications.
The code now looks like this:
function newScene() {
const scene = new THREE.Scene();
// create two three.js lights to illuminate the model
const directionalLight = new THREE.DirectionalLight(0xffffff);
directionalLight.position.set(0, -70, 100).normalize();
scene.add(directionalLight);
const directionalLight2 = new THREE.DirectionalLight(0xffffff);
directionalLight2.position.set(0, 70, 100).normalize();
scene.add(directionalLight2);
return scene;
}
function newRenderer(map, gl) {
// use the Mapbox GL JS map canvas for three.js
const renderer = new THREE.WebGLRenderer({
canvas: map.getCanvas(),
context: gl,
antialias: true
});
renderer.autoClear = false;
return renderer;
}
// create a custom layer for a 3D model per the CustomLayerInterface
export function addModel(modelPath, origin, altitude = 0, orientation = [Math.PI / 2, 0, 0]) {
const coords = mapboxgl.MercatorCoordinate.fromLngLat(origin, altitude);
// transformation parameters to position, rotate and scale the 3D model onto the map
const modelTransform = {
translateX: coords.x,
translateY: coords.y,
translateZ: coords.z,
rotateX: orientation[0],
rotateY: orientation[1],
rotateZ: orientation[2],
/* Since our 3D model is in real world meters, a scale transform needs to be
* applied since the CustomLayerInterface expects units in MercatorCoordinates.
*/
scale: coords.meterInMercatorCoordinateUnits()
};
const scaleVector = new THREE.Vector3(modelTransform.scale, -modelTransform.scale, modelTransform.scale)
return {
id: "3d-model",
type: "custom",
renderingMode: "3d",
onAdd: function(map, gl) {
this.map = map;
this.camera = new THREE.Camera();
this.scene = newScene();
this.renderer = newRenderer(map, gl);
// use the three.js GLTF loader to add the 3D model to the three.js scene
new THREE.GLTFLoader()
.load(modelPath, gltf => {
gltf.scene.position.fromArray([coords.x, coords.y, coords.z]);
gltf.scene.setRotationFromEuler(new THREE.Euler().fromArray(orientation));
gltf.scene.scale.copy(scaleVector);
this.scene.add(gltf.scene);
const bbox = new THREE.Box3().setFromObject(gltf.scene);
console.log(bbox);
this.scene.add(new THREE.Box3Helper(bbox, 'blue'));
});
},
render: function(gl, matrix) {
this.camera.projectionMatrix = new THREE.Matrix4().fromArray(matrix);
this.renderer.state.reset();
this.renderer.render(this.scene, this.camera);
// this.map.triggerRepaint();
}
}
}
It basically works, in that a model is loaded and drawn in the right location in the Mapbox world. However, instead of looking like this:
It now looks like this, a mangled mess that jitters around chaotically as the camera moves:
I'm not yet familiar enough with ThreeJS to have any idea what I did wrong.
Here's a side-by-side comparison of the old, functional code on the right, vs the new broken code on the left.
Further investigation
I suspect possibly the cause is to do with shrinking all the coordinates down to within the [0..1] range of the projected coordinate system, and losing mathematical precision, perhaps. When I scale the model up by 100 times, it renders like this - messy and glitchy, but at least recognisable as something.

Using OutlinePass (THREE.js r102) with skinned mesh

/examples/js/postprocessing/OutlinePass.js from THREE.js r102 does not appear to work with skinned meshes. Specifically, the rendered outline always stays in the mesh's rest position.
Is there some way to get this working (that is, to update the outline to reflect the current pose of an animated mesh)? OutlinePass does not appear to be documented (mod the comments in the code itself).
Is there some other accepted method of outlining animated meshes? I'm in the process of migrating some code from r7x, where I ended up accomplishing this by manually creating a copy of the mesh and applying a shader material that scales along the normals. I can do that again, but if there's a simpler/better supported method to accomplish the same effect I'd rather use it instead of reproducing a method that breaks every new major release.
A simple jsfiddle illustrating the issue:
https://jsfiddle.net/L69pe5q2/3/
This is the code from the jsfiddle. The mesh I use is the SimpleSkinning.gltf example from the three.js distribution. In the jsfiddle I load it from a dataURI so it doesn't complain about XSS loading, and I've edited the base64-encoded data out (and replaced it with [FOO]) in the code below, purely for readability.
The OutlinePass is created and added to the composer in initComposer().
var camera, light, renderer, composer, mixer, loader, clock;
var scene, mesh, outlinePass;
var height = 480,
width = 640;
var clearColor = '#666666';
load();
function load() {
loader = new THREE.GLTFLoader();
clock = new THREE.Clock();
scene = new THREE.Scene();
loader.load('data:text/plain;base64,[FOO]', function(obj) {
scene.add(obj.scene);
mixer = new THREE.AnimationMixer(obj.scene);
var clip = THREE.AnimationClip.findByName(obj.animations,
'Take 01');
var a = mixer.clipAction(clip);
a.reset();
a.play();
mesh = obj.scene;
mesh.position.set(-7, 2.5, -7);
init();
animate();
});
}
function init() {
initCamera();
initScene();
initRenderer();
initComposer();
outlinePass.selectedObjects = [mesh];
}
function initCamera() {
camera = new THREE.PerspectiveCamera(30, width / height, 1, 10000);
camera.position.set(7, 0, 7);
camera.lookAt(0, 0, 0);
}
function initScene() {
light = new THREE.AmbientLight(0xffffff)
scene.add(light);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({
width: width,
height: height,
antialias: false,
});
renderer.setSize(width, height);
renderer.setClearColor(clearColor);
document.body.appendChild(renderer.domElement);
}
function initComposer() {
var renderPass, copyPass;
composer = new THREE.EffectComposer(renderer);
renderPass = new THREE.RenderPass(scene, camera);
composer.addPass(renderPass);
outlinePass = new THREE.OutlinePass(new THREE.Vector2(width, height),
scene, camera);
composer.addPass(outlinePass);
outlinePass.edgeStrength = 10;
outlinePass.edgeThickness = 4;
outlinePass.visibleEdgeColor.set('#ff0000');
copyPass = new THREE.ShaderPass(THREE.CopyShader);
copyPass.renderToScreen = true;
composer.addPass(copyPass);
}
function animate() {
var delta = clock.getDelta();
requestAnimationFrame(animate);
update(delta);
render(delta);
}
function update(delta) {
if (mixer) mixer.update(delta);
}
function render(delta) {
composer.render();
}
according to Mugen87 in Jan 2019 he said:
With this small patch, it's now possible to use the outline pass with animated meshes. The only thing users have to do at app level is to set morphTargets or skinning to true for OutlinePass.depthMaterial and OutlinePass.prepareMaskMaterial. That's of course still a manual effort but at least the more complicated shader enhancement is already done.
take this example:
https://jsfiddle.net/2ybks7rd/
reference link on github

Three.js: Apply SSAO (Screen Space Ambient Occlusion) to Displacement map

have I've implemented the Screen Space Ambient Occlusion in my Three.js project correctly, and run perfect, like this:
//Setup SSAO pass
depthMaterial = new THREE.MeshDepthMaterial();
depthMaterial.depthPacking = THREE.RGBADepthPacking;
depthMaterial.blending = THREE.NoBlending;
var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat, stencilBuffer: true }; //Stancilbuffer true because not effect transparent object
depthRenderTarget = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight, pars);
depthRenderTarget.texture.name = "SSAOShader.rt";
ssaoPass = new THREE.ShaderPass(THREE.SSAOShader);
///////ssaoPass.uniforms[ "tDiffuse" ].value will be set by ShaderPass
ssaoPass.uniforms["tDepth"].value = depthRenderTarget.texture;
ssaoPass.uniforms['size'].value.set(window.innerWidth, window.innerHeight);
ssaoPass.uniforms['cameraNear'].value = camera.near;
ssaoPass.uniforms['cameraFar'].value = camera.far;
ssaoPass.uniforms['radius'].value = radius;
ssaoPass.uniforms['aoClamp'].value = aoClamp;
ssaoPass.uniforms['lumInfluence'].value = lumInfluence;
But, when I set a material with displacementMap (that run correctly without SSAO enabled), this is the result. Notice that the SSAO is applied "correctly" to the original sphere (with a strange-trasparent-artificat), but I need to apply it to the "displaced vertex" of the sphere)
This is my composer passes:
//Main render scene pass
postprocessingComposer.addPass(renderScene);
//Post processing pass
if (ssaoPass) {
postprocessingComposer.addPass(ssaoPass);
}
And this is the rendering loop with composer
if (postprocessingComposer) {
if (ssaoPass) {
//Render depth into depthRenderTarget
scene.overrideMaterial = depthMaterial;
renderer.render(scene, camera, depthRenderTarget, true);
//Render composer
scene.overrideMaterial = null;
postprocessingComposer.render();
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
else {
//Render loop with post processing (no SSAO, becasue need more checks, see above)
renderer.clear();
postprocessingComposer.render();
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
}
else {
//Simple render loop (no post-processing)
renderer.clear();
renderer.render(scene, camera);
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
}
How can i archive a correct Screen Space Ambient Occlusion applied to a mesh with Displacement Map? Thanks.
[UPDATE]:
After some work i tried to this procedure for every child in the scene, with displacement map, to define a new a new overrideMaterial of the scene equal to a depthMaterial with displacement map parameters of the child material.
var myDepthMaterial = new THREE.MeshDepthMaterial({
depthPacking: THREE.RGBADepthPacking,
displacementMap: child.material.displacementMap,
displacementScale: child.material.displacementScale,
displacementBias: child.material.displacementBias
});
child.onBeforeRender = function (renderer, scene, camera, geometry, material, group) {
scene.overrideMaterial = myDepthMaterial;
};
This solution sounds good, but doesnt work.
You are using SSAO with a displacement map. You need to specify the displacement map when you instantiate the depth material.
depthMaterial = new THREE.MeshDepthMaterial( {
depthPacking: THREE.RGBADepthPacking,
displacementMap: displacementMap,
displacementScale: displacementScale,
displacementBias: displacementBias
} );
three.js r.87

how to rotate a sprite object in three.js?

I need to rotate a sprite object but it seems this is not feasible, if not, is there a way to achieve the rotation effect, maybe through the UV coordinates of the spriteMaterial, or a custom shader? what would be the best way to go?
The rotation of a Sprite is set by its material's rotation parameter. For example, like so:
var material = new THREE.SpriteMaterial( {
color: 0xffffff,
map: texture,
rotation: Math.PI / 4
} );
var sprite = new THREE.Sprite( material );
three.js r.67
It took me forever, but turns out you need to rotate the sprite material and not the sprite itself. Like so:
var scale = 1;
function animate() {
requestAnimationFrame(animate);
// Rotate the sprite:
sprite.material.rotation += 0.01;
// Resize the sprite:
scale += 0.01
sprite.scale.set(scale, scale, 0);
// Move the sprite:
sprite.position.x += 0.01;
renderer.render(scene, camera);
}

Specifying the range for Depth of Field (MeshDepthMaterial range?)

I have adapted this post processing example http://mrdoob.github.com/three.js/examples/webgl_postprocessing_dof.html to apply a Depth of Field / Bokeh effect. How can I specify the focus range (or whatever it could be called)?
If the camera far plane is at 10000, and the model size is 10 it was impossible to focus into invidual parts of the model - because it tries to focus from 1-10000 (camera-near to camera-far) instead of 1-10 (between camera and the back of my model), the actual area of interest.
It did work fine after I realised to set the camera far plane as low as possible (to about same as scene size), so the focus can adjusted where the actual model is.
Now I can't do the camera far plane trick anymore, because I added a skybox, so the camera needs to have it's far plane quite far related to the model size. That messes up the Depth of Field; I can focus very close or very far, but the whole model is either completely blurred or not blurred at all as the adjustable distance is way too big (all the way to the skybox).
If I know the area I want to be able to focus at, how can I specify it in my code?
Here is my setup code:
dof_material_depth = new THREE.MeshDepthMaterial();
dof_scene = new THREE.Scene();
dof_camera = new THREE.OrthographicCamera(SCREEN_WIDTH / - 2, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2, SCREEN_HEIGHT / - 2, -10000, 10000 );
dof_camera.position.z = 100;
dof_scene.add( dof_camera );
var pars = { minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat };
dof_rtTextureDepth = new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, pars );
dof_rtTextureColor = new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, pars );
dof_bokeh_shader = THREE.BokehShader;
dof_bokeh_uniforms = THREE.UniformsUtils.clone(dof_bokeh_shader.uniforms );
dof_bokeh_uniforms[ "tColor" ].value = dof_rtTextureColor;
dof_bokeh_uniforms[ "tDepth" ].value = dof_rtTextureDepth;
dof_bokeh_uniforms[ "focus" ].value = 1.1;
dof_bokeh_uniforms[ "aspect" ].value = SCREEN_WIDTH / SCREEN_HEIGHT;
dof_materialBokeh = new THREE.ShaderMaterial( {
uniforms: dof_bokeh_uniforms,
vertexShader: dof_bokeh_shader.vertexShader,
fragmentShader: dof_bokeh_shader.fragmentShader
});
dof_quad = new THREE.Mesh( new THREE.PlaneGeometry(SCREEN_WIDTH, SCREEN_HEIGHT), dof_materialBokeh );
dof_quad.position.z = -500;
dof_scene.add(dof_quad );
And here the rendering part:
renderer.render(scene, camera, dof_rtTextureColor, true );
scene.overrideMaterial = dof_material_depth;
renderer.render(scene, camera, dof_rtTextureDepth, true );
dof_scene.overrideMaterial = null;
render(dof_scene, dof_camera );
var delta = 0.01;
composerScene.render( delta);
EDIT:
I did manage to get desired results by setting a low far plane for the camera just before rendering the depth material, then reverting back to normal before rendering the composite:
renderer.render(scene, camera, dof_rtTextureColor, true );
var oldfar = camera.far; // this goes to skybox
camera.far = scenesize; // this goes to just behind the model
scene.overrideMaterial = dof_material_depth;
renderer.render(scene, camera, dof_rtTextureDepth, true );
camera.far = oldfar;
dof_scene.overrideMaterial = null;
render(dof_scene, dof_camera );
var delta = 0.01;
composerScene.render( delta);
This works perfect. I will leave the question open though, as I'm quite new to WebGLL / 3D programming in general, want to learn, and would like to know if it's possible to do this in the shaders/materials setup phase.

Resources