I have a threejs animation mixer set up as follows:
this.mixer = new THREE.AnimationMixer(this.object);
this.mixer.timeScale = 2; //play twice as fast
this.mixer.addEventListener('finished', this.handlePullAnimFinished);
this.pullAnim = this.mixer.clipAction(this.animations[0]);
this.pullAnim.clampWhenFinished = true;
this.pullAnim.setLoop(THREE.LoopOnce);
this.pullAnim.enable = true;
but if I try to do something like this.mixer.setTime(0.5), followed, optionally, by this.mixer.update() nothing happens
How do I programmatically set the mixer to a specific point in an animation (and not have it autoplay)?
I've seen the documentation here on setting up animations to autoplay (and successfully gotten that to work)
At first glance it looks like your mistake is that .update() does not have any arguments in it.
According to the docs, the mixer expects the update method to receive a change in seconds on each frame.
According to this demo you can do something like this:
var clock = new THREE.Clock();
function animate() {
var dt = clock.getDelta();
mixer.update( dt );
renderer.render( scene, camera );
requestAnimationFrame( animate );
}
you could try this:
// r3f version
useEffect(()=>{
action.play()
}, [])
useFrame(()=>{
mixer.setTime(.3)
})
// vanilla version
actions['EmptyAction.001'].play()
let time = .3
animate(){
mixer.setTime(time)
requestAnimationFrame(animate)
}
animate()
Related
I want to reset my camera which has an active damping. I've tried different ways but I don't know if they are correct.
The goal is to stop the damping and set the initial position of the camera and then activate the damping again. I want to avoid that the model/camera is rotating a bit after I reset the controls with a button. I would do it like this:
controls.enableDamping = false;
controls.update();
camera.position.set( 10, 13, 10 );
camera.lookAt( 0, 0, 0 );
controls.enableDamping = true;
controls.update();
My rendering function is called by a EventListener:
controls.addEventListener( "change", requestRenderer );
And the render function:
const renderer = new THREE.WebGLRenderer( { canvas: canvas, antialias: true, alpha: true } );
let renderRequested = false;
function render( time ) {
time *= 0.001;
renderRequested = false;
resizeRenderer( renderer, camera );
controls.update();
renderer.render( scene, camera );
}
function requestRenderer() {
if( !renderRequested ) {
renderRequested = true;
requestAnimationFrame( render );
}
}
This works pretty well. The question is if this is the correct way and when do I have to update the controls? I think the first update is necessary to tell the controls that the damping isn't active anymore (but what does the update do?) and I think, that I don't need the second update.
From your code, it seems like you don't need the second update. You aren't changing a key property about the controls, in your render() loop:
controls.update();
Which covers every other case.
/examples/js/postprocessing/OutlinePass.js from THREE.js r102 does not appear to work with skinned meshes. Specifically, the rendered outline always stays in the mesh's rest position.
Is there some way to get this working (that is, to update the outline to reflect the current pose of an animated mesh)? OutlinePass does not appear to be documented (mod the comments in the code itself).
Is there some other accepted method of outlining animated meshes? I'm in the process of migrating some code from r7x, where I ended up accomplishing this by manually creating a copy of the mesh and applying a shader material that scales along the normals. I can do that again, but if there's a simpler/better supported method to accomplish the same effect I'd rather use it instead of reproducing a method that breaks every new major release.
A simple jsfiddle illustrating the issue:
https://jsfiddle.net/L69pe5q2/3/
This is the code from the jsfiddle. The mesh I use is the SimpleSkinning.gltf example from the three.js distribution. In the jsfiddle I load it from a dataURI so it doesn't complain about XSS loading, and I've edited the base64-encoded data out (and replaced it with [FOO]) in the code below, purely for readability.
The OutlinePass is created and added to the composer in initComposer().
var camera, light, renderer, composer, mixer, loader, clock;
var scene, mesh, outlinePass;
var height = 480,
width = 640;
var clearColor = '#666666';
load();
function load() {
loader = new THREE.GLTFLoader();
clock = new THREE.Clock();
scene = new THREE.Scene();
loader.load('data:text/plain;base64,[FOO]', function(obj) {
scene.add(obj.scene);
mixer = new THREE.AnimationMixer(obj.scene);
var clip = THREE.AnimationClip.findByName(obj.animations,
'Take 01');
var a = mixer.clipAction(clip);
a.reset();
a.play();
mesh = obj.scene;
mesh.position.set(-7, 2.5, -7);
init();
animate();
});
}
function init() {
initCamera();
initScene();
initRenderer();
initComposer();
outlinePass.selectedObjects = [mesh];
}
function initCamera() {
camera = new THREE.PerspectiveCamera(30, width / height, 1, 10000);
camera.position.set(7, 0, 7);
camera.lookAt(0, 0, 0);
}
function initScene() {
light = new THREE.AmbientLight(0xffffff)
scene.add(light);
}
function initRenderer() {
renderer = new THREE.WebGLRenderer({
width: width,
height: height,
antialias: false,
});
renderer.setSize(width, height);
renderer.setClearColor(clearColor);
document.body.appendChild(renderer.domElement);
}
function initComposer() {
var renderPass, copyPass;
composer = new THREE.EffectComposer(renderer);
renderPass = new THREE.RenderPass(scene, camera);
composer.addPass(renderPass);
outlinePass = new THREE.OutlinePass(new THREE.Vector2(width, height),
scene, camera);
composer.addPass(outlinePass);
outlinePass.edgeStrength = 10;
outlinePass.edgeThickness = 4;
outlinePass.visibleEdgeColor.set('#ff0000');
copyPass = new THREE.ShaderPass(THREE.CopyShader);
copyPass.renderToScreen = true;
composer.addPass(copyPass);
}
function animate() {
var delta = clock.getDelta();
requestAnimationFrame(animate);
update(delta);
render(delta);
}
function update(delta) {
if (mixer) mixer.update(delta);
}
function render(delta) {
composer.render();
}
according to Mugen87 in Jan 2019 he said:
With this small patch, it's now possible to use the outline pass with animated meshes. The only thing users have to do at app level is to set morphTargets or skinning to true for OutlinePass.depthMaterial and OutlinePass.prepareMaskMaterial. That's of course still a manual effort but at least the more complicated shader enhancement is already done.
take this example:
https://jsfiddle.net/2ybks7rd/
reference link on github
as part of a project, I have to turn the camera around an object (position 0, 0,0) which remains to him still. For this, I want to know if the LookAt function is the one that is best suited , And also how does it work?
Integrating OrbitControls should be done with a few lines of code. So, the basic lines of code should be:
// init
var controls = new THREE.OrbitControls( camera, renderer.domElement );
controls.enableZoom = false; // optional
controls.enablePan = false; // optional
controls.center.set(0,0,0); // should be initially 0,0,0
controls.addEventListener( 'change', render ); // if you are not using requestAnimationFrame()
camera.position.z = 500; // should be bigger than the radius of your sphere
// render
function render() {
renderer.render(scene, camera);
}
<script src="js/controls/OrbitControls.js"></script>
Now, you should be able to rotate the camera around your sphere using your mouse.
All the other essential stuff (camera, renderer) can be found at the example: https://threejs.org/examples/#misc_controls_orbit
1.
I am trying to set up multiple canvases on a page like the given examples on threejs.org.
My basic code is like this:
var scene, camera, controls, renderer, pointLight, geometry, material;
var container, position, dimensions, apps = [];
var windowWidth = window.innerWidth;
var windowHeight = window.innerHeight;
function preView( id ){
apps.push( new App( id ) );
//animate(); // if i call this here, all canvases renders once
function App( id ) {
container = $('#preView_' + id);
dimensions = { width: container.width(), height: container.height()};
camera = new THREE.PerspectiveCamera(45, dimensions.width/dimensions.height, 1, 5 * radius);
camera.position.x = 0;
camera.position.y = 0;
camera.position.z = 100;
scene = new THREE.Scene();
/* add meshes */
/* ======================= */
renderer = new THREE.WebGLRenderer();
renderer.setSize(dimensions.width, dimensions.height);
container.append(renderer.domElement);
this.animate = function() {
if( camera.position.z > -(1/3) * 100 )
{
/* simple fly through the scene */
camera.position.x += 0.05;
camera.position.y += 0.05;
camera.position.z -= 0.1;
}
camera.lookAt(new THREE.Vector3(0,0,0));
render();
};
}
}
function animate(){
for ( var i = 0; i < apps.length; ++i ) {
apps[ i ].animate();
}
requestAnimationFrame(animate);
}
function render(){
renderer.render(scene, camera);
}
The strange thing what happends is, that only the last canvas renderes (at all) if i call animate(); after all canvases are drawn. And if i call animate(); in the preView(); Function, all sences are rendered once but only the last canvas renderes the 'camera fly through'. But a console.log(apps[i]); in the animate(); function go through all apps, but dont render the scene.
What do i do wrong here?
2.
Also i try to achieve this shader effect for every object which i declare as 'light', nomatter which position it has in the scene.
I tried to play a little with all position values in the shaders with absolutly no effect.
The only effect was in the VolumetericLightShader on line 333.
I hope for any Hints here.
Put all the variables, except apps=[], in App( id ) function. Thus you'll make them local for App. In your case now, every time you call
new App( id )
you put information in global variables which you created once. So in those variables you have the data you've stored there since last call of App( id ).
It means that you re-write the data in global variables. The same about the render() method. Put it inside the App() function too. As you mentioned about the example from Threejs.org, you had to notice where this method is stored. It's inside the App() function there. Sample jsfiddle
Maybe it would be easier to use the technique of lens flares. https://threejs.org/examples/webgl_lensflares.html
I've loaded a Blender model using the three.js library and want to allow the users to change the texture of some faces through an input field in a form. I don't have any problem when I use the WebGLRenderer, and it works fine in Chrome, but it doesn't work with the canvas renderer when the texture coming from the input is in data:image... format. Seems if I load a full path image from the server it works fine. Does anybody know if there's a way to load textures this way and render them with the canvasrenderer?
Thank you.
I add here the code after I set the camera, lights and detect it the browswer detects webgl or not to use the WebGLRenderer or the CanvasRenderer.
First I load the model from blender:
var loader = new THREE.JSONLoader();
loader.load('assets/models/mimaquina9.js', function (geometry, mat) {
//I set the overdraw property to 1 for each material like i show here for 16
mat[16].overdraw = 1;
mesh = new THREE.Mesh(geometry, new THREE.MeshFaceMaterial(mat) );
mesh.scale.x = 5;
mesh.scale.y = 5;
mesh.scale.z = 5;
scene.add(mesh);
}, 'assets/images');
render();
//To render, I try to make an animation in case WebGL is available and just render one frame in case of using the canvas renderer.
function render() {
if(webgl){
if (mesh) {
mesh.rotation.y += 0.02;
}
// render using requestAnimationFrame
requestAnimationFrame(render);
webGLRenderer.render(scene, camera);
}
else if(canvas){
camera.position.x = 30;
camera.position.y = 20;
camera.position.z = 40;
camera.lookAt(new THREE.Vector3(0, 10, 0));
setTimeout(function (){
//something you want delayed
webGLRenderer.render(scene, camera);
}, 1000);
}
}
$('#datafile').change(function(e)
{
e.preventDefault();
var f = e.target.files[0];
if(f && window.FileReader)
{
var reader = new FileReader();
reader.onload = function(evt) {
console.log(evt);
mesh.material.materials[16].map = THREE.ImageUtils.loadTexture(evt.target.result);
if(canvas && !webgl){
//I read that might be a problem of using Lambert materials, so I tried this commented line without success
//mesh.material.materials[16] = new THREE.MeshBasicMaterial( { map: THREE.ImageUtils.loadTexture(evt.target.result)});
//If I uncomment the next line, it displays the texture fine when rendering after.
//mesh.material.materials[16].map = THREE.ImageUtils.loadTexture("assets/images/foto.jpg");
render();
}
}
reader.readAsDataURL(f);
}
});
Thanks once more.
evt.target.result is a DataURL so you should assign that to a image.src. Something like this should work:
var image = document.createElement( 'img' );
image.src = evt.target.result;
mesh.material.materials[16].map = new THREE.Texture( image );