Three.js multiple layers and selective lighting with effect composer - three.js

I have a scene involving several planets rotating around a sun. In order to get the lighting right (and allow planets to cast a shadow on their own rings) I have each planet in its own layer, each with its own directional light shining from the direction of the sun. Now, I want to add an effects composer to the scene, but I can't figure out how to render it correctly so that all the layers show up. This is my current animation loop, which works:
renderer.autoClear = false
animate() {
this.camera.layers.set(0)
renderer.render(scene, this.camera)
this.camera.layers.set(1)
renderer.render(scene, this.camera)
this.camera.layers.set(2)
renderer.render(scene, this.camera)
this.camera.layers.set(3)
renderer.render(scene, this.camera)
requestAnimationFrame(animate)
}
But I can't figure out how to add the effects composer on top of all that. I have tried adding a separate camera for each layer and a renderPass for each camera:
for (let i = 0; i < 4; i++) {
this.camera[i] = new THREE.PerspectiveCamera()
this.camera[i].layers.set(i)
let renderPass = new RenderPass(scene, this.camera[i])
composer.addPass( renderPass )
}
animate() {
composer.render()
requestAnimationFrame(animate)
}
But when I do this, only the final layer is visible. How do I avoid erasing the previous layers?

Related

Attempts to load a texture show no error but the texture does not display

I have a model, a background sky and a ground surface. Texturing the surface results in no surface.
I've tried the basic approach and come to the conclusion that it is probably that the scene is being rendered before the texture has finished loading. Having searched and found various possible solutions, I have tried several of them, without really understanding how they are supposed to work. None of them has worked. One problem is that it is an old problem and most of the suggestions involve outdated versions of the three.js library.
// Ground
// create a textured Ground based on an answer in Stackoverflow.
var loader = new THREE.TextureLoader();
loader.load('Textures/Ground128.jpg',
function (texture) {
var groundGeometry = new THREE.PlaneBufferGeometry(2000, 2000, 100, 100);
const groundMaterial = new THREE.MeshLambertMaterial({map: texture});
var ground = new THREE.Mesh(groundGeometry, groundMaterial);
ground.receiveShadow = true; //Illumination addition
ground.rotation.x = -0.5 * Math.PI; // rotate into the horizontal.
scene.add(ground);
}
);
// This variation does not work either
http://lhodges.users37.interdns.co.uk/me/downloads/Aphaia/Temple.htm
http://lhodges.users37.interdns.co.uk/me/downloads/Aphaia/Temple7jsV0.15b.htm
The first of the above is the complete page in which the ground is a plain billiard table green. The second is the page containing the above code.
There appear to be no error (Last time I tried.)
By the time your texture loads and you add the ground, your scene has already rendered (and there is no other render call).
You need to call renderer.render(scene, camera); after adding the ground to the scene.
// Ground
// create a textured Ground based on an answer in Stackoverflow.
var loader = new THREE.TextureLoader();
loader.load('Textures/Ground128.jpg',
function (texture) {
var groundGeometry = new THREE.PlaneBufferGeometry(2000, 2000, 100, 100);
const groundMaterial = new THREE.MeshLambertMaterial({map: texture});
var ground = new THREE.Mesh(groundGeometry, groundMaterial);
ground.receiveShadow = true; //Illumination addition
ground.rotation.x = -0.5 * Math.PI; // rotate into the horizontal.
scene.add(ground);
renderer.render(scene, camera); // <--- add this line
}
);

Function LookAt to turn around an object ?

as part of a project, I have to turn the camera around an object (position 0, 0,0) which remains to him still. For this, I want to know if the LookAt function is the one that is best suited , And also how does it work?
Integrating OrbitControls should be done with a few lines of code. So, the basic lines of code should be:
// init
var controls = new THREE.OrbitControls( camera, renderer.domElement );
controls.enableZoom = false; // optional
controls.enablePan = false; // optional
controls.center.set(0,0,0); // should be initially 0,0,0
controls.addEventListener( 'change', render ); // if you are not using requestAnimationFrame()
camera.position.z = 500; // should be bigger than the radius of your sphere
// render
function render() {
renderer.render(scene, camera);
}
<script src="js/controls/OrbitControls.js"></script>
Now, you should be able to rotate the camera around your sphere using your mouse.
All the other essential stuff (camera, renderer) can be found at the example: https://threejs.org/examples/#misc_controls_orbit

Why does this ThreeJs plane appear to get a kink in it as the camera moves down the y-axis?

I have an instance of THREE.PlaneBufferGeometry that I apply an image texture to like this:
var camera, scene, renderer;
var geometry, material, mesh, light, floor;
scene = new THREE.Scene();
THREE.ImageUtils.loadTexture( "someImage.png", undefined, handleLoaded, handleError );
function handleLoaded(texture) {
var geometry = new THREE.PlaneBufferGeometry(
texture.image.naturalWidth,
texture.image.naturalHeight,
1,
1
);
var material = new THREE.MeshBasicMaterial({
map: texture,
overdraw: true
});
floor = new THREE.Mesh( geometry, material );
floor.material.side = THREE.DoubleSide;
scene.add( floor );
camera = new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 1, texture.image.naturalHeight * A_BUNCH );
camera.position.z = texture.image.naturalWidth * 0.5;
camera.position.y = SOME_INT;
camera.lookAt(floor.position);
renderer = new THREE.CanvasRenderer();
renderer.setSize(window.innerWidth,window.innerHeight);
appendToDom();
animate();
}
function handleError() {
console.log(arguments);
}
function appendToDom() {
document.body.appendChild(renderer.domElement);
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene,camera);
}
Here's the code pen: http://codepen.io/anon/pen/qELxvj?editors=001
( Note: ThreeJs "pollutes" the global scope, to use a harsh term, and then decorates THREE using a decorator pattern--relying on scripts loading in the correct order without using a module loader system. So, for brevity's sake, I simply copy-pasted the source code of a few required decorators into the code pen to ensure they load in the right order. You'll have to scroll down several thousand lines to the bottom of the code pen to play with the code that instantiates the plane, paints it and moves the camera. )
In the code pen, I simply lay the plane flat against the x-y axis, looking straight up the z-axis, as it were. Then, I slowly pan the camera down along the y-axis, continuously pointing it at the plane.
As you can see in the code pen, as the camera moves along the y-axis in the negative direction, the texture on the plane appears to develop a kink in it around West Texas.
Why? How can I prevent this from happening?
I've seen similar behaviour, not in three.js, not in a browser with webGL but with directX and vvvv; still, i think you'll just have to set widthSegments/heightSegments of your PlaneBufferGeometry to a higher level (>4) and you're set!

three.js in,Sprite.render Depth attribute is not supported

This is my code:
var sprite = new THREE.Sprite(material);
sprite.renderDepth = 10;
The above renderDepth setting is invalid, it does not work for sprites.
How to solve this problem?
You want one sprite to always be on top.
Since SpriteMaterial does not support a user-specified renderDepth, you have to implement a work-around.
Sprites are rendered last when using WebGLRenderer.
The easiest way to do what you want is to have two scenes and two render passes, with one sprite in the second scene like so:
renderer.autoClear = false;
scene2.add( sprite2 );
then in the render loop
renderer.render( scene, camera );
renderer.clearDepth();
renderer.render( scene2, camera );
three.js r.64

Three.js raycaster intersection empty when objects not part of scene

I've tried improving rendering time on my project by creating meshes and putting them as part of a larger geometry, and having just that single geometry as the object I add to the scene. I thought that I would still be able to manage picking of objects by having an array of the original meshes, and pass those to the raycaster. I used the following code:
var vector = new THREE.Vector3( ( loc_x / window.innerWidth ) * 2 - 1, - ( loc_y / window.innerHeight ) * 2 + 1, 0.5 );
projector.unprojectVector(vector, camera);
var raycaster = new THREE.Raycaster( camera.position, vector.sub( camera.position ).normalize() );
var objects = [];
var i = active_regions.length;
while (i--) {
objects = objects.concat(active_regions[i].mesh_entities);
}
var intersects = raycaster.intersectObjects( objects );
if ( intersects.length > 0 ) {
console.log("Intersection: " + intersects);
}
So in the above code, active_regions contains the original individual meshes, and I create an array on the fly to specify which objects I want to select from. Unfortunately intersects comes up empty.
If I modify my project slightly so that I have all those mesh_entities added to the scene individually, then the above code works and I can successfully select objects. Unfortunately, the whole scene then renders slowly.
What's a good way (or some good ways) for me to successfully check for intersection with the ray, without slowing down my rendering?
Thanks!
You need to update the Matrices for the objects not in the render scene manually as its done as part of the render process so if you are using your ghost scene, you don't need to render it, just update the matrices before doing the intersection:
scene_ghost.updateMatrixWorld(true);
I solved this by having a ghost scene. Essentially, I added all objects to the ghost scene as their individual meshes, and then when I use raycaster it works.
However, I had to use functions along these lines:
function flip_render_ghost(yes) {
if (yes == true) {
scene_ghost.add(camera);
render_ghost = true;
} else {
scene.add(camera);
render_ghost = false;
}
render();
}
function render() {
if (render_ghost == true) {
renderer.render( scene_ghost, camera );
} else {
renderer.render( scene, camera );
}
}
Whenever I am about to check for collisions, I flip to rendering ghost scene, check for hits, then flip back to normal rendering.
Edit: I have since discovered that objects cannot belong to multiple scenes (though geometries can be shared). So what I have done is created simple meshes for the picking scene. This requires more memory, but gives the option of having a simpler mesh to use for selection for faster picking. Also, it seemed to work for me to send the children of the ghost scene itself to the raycaster. You may need to, like me, add a property to each object in the ghost scene that references the main object you are trying to pick.
I am doing something similar in this and have verified that you need to render the scene to do proper raycasting. It is easy to optimize this however to just render both screens and have one clear over the other. You should be able to change your code to this, granted the second render call will clear over the first screen:
function render() {
renderer.render( scene_ghost, camera );
renderer.render( scene, camera );
}

Resources