Threejs 'view crop' using viewport and scissors - three.js

Overview:
I have a scene with a positioned perspective camera and multiple assets that produces the output that I want
I don't want to move / change the fov of the camera, as this will require me dynamically adjusting all elements in the scene (see debug camera view image)
I want a 'view crop' of the scene from whatever the full size is to a smaller size (not always in the same proportion. See resultant canvas image
I have tried combinations of:
renderer.setScissor(0, 0, 320, 240)
renderer.setScissorTest(true)
renderer.setViewport(0, 0, 320, 240)
renderer.setSize(320, 240)
renderer.getContext().canvas.width = 320
renderer.getContext().canvas.height = 240
renderer.getContext().canvas.style.width = '320px'
renderer.getContext().canvas.style.height = '240px'
Applying the scissor gives me example what I want to see, because only those items are rendered, but the whole view is still the same size
Applying the viewport scales the WHOLE image down,
Adjusting the canvas crops relative to the whole image rather than the points I want to crop from.
I CAN do blitting (copy the exact pixels I want onto a separate canvas, but I was hoping there was another simpler way.
Any ideas?
I've added a codepen example here:
https://codepen.io/faysvas/pen/KKzPQpa
const makeCube = (scene, color, x) => {
const material = new THREE.MeshPhongMaterial({ color })
const cube = new THREE.Mesh(new THREE.BoxGeometry(1, 1, 1), material)
scene.add(cube)
cube.position.x = x
return cube;
}
const addSceneContents = (scene) => {
const light = new THREE.DirectionalLight(0xffffff, 1)
light.position.set(-1, 2, 4)
scene.add(light)
return [
makeCube(scene, 0x44aa88, 0),
makeCube(scene, 0x8844aa, -2),
makeCube(scene, 0xaa8844, 2)
]
}
const main = () => {
const canvas = document.querySelector("#c")
const renderer = new THREE.WebGLRenderer({ canvas })
renderer.setSize(512, 512, false)
const fov = 75
const aspect = 1
const near = 0.1
const far = 5
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far)
camera.position.z = 2
const scene = new THREE.Scene()
scene.background = new THREE.Color( 0xff0000 )
let cubes = addSceneContents(scene)
resizeTo320x240(renderer, canvas)
const render = (time) => {
time *= 0.001
cubes.forEach((cube, ndx) => {
const speed = 1 + ndx * 0.1
const rot = time * speed
cube.rotation.x = rot
cube.rotation.y = rot
})
renderer.render(scene, camera)
requestAnimationFrame(render)
}
requestAnimationFrame(render)
}
/*
This function should 'crop' from the whole scene without
distorting the perspective of the camera and ensuring the
canvas is 320x240
e.g. I want the canvas to be the same size and output of
red cropped view below. Eg, no black and the canvas (and
it's red contents) should be in the top left of the corner
of the screen
*/
const resizeTo320x240 = (renderer, canvas) => {
console.log('code goes here')
const desiredWidth = 320
const desiredHeight = 240
const currentSize = renderer.getSize(new THREE.Vector2())
const x = (currentSize.x / 2) - (desiredWidth/2)
const y = (currentSize.y / 2) - (desiredHeight/2)
renderer.setScissor(x, y, desiredWidth, desiredHeight)
renderer.setScissorTest(true)
//renderer.setViewport(x, y, desiredWidth, desiredHeight)
}
main()
html, body {
margin: 0;
height: 100%;
background-color: grey;
}
#c {
display: block;
background-color: red;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r119/three.min.js"></script>
<canvas id="c"></canvas>

I figured it out. The renderer or the viewport is not the place to solve it, instead, the camera itself has the ability to offset or clip it's own output.
const makeCube = (scene, color, x) => {
const material = new THREE.MeshPhongMaterial({ color })
const cube = new THREE.Mesh(new THREE.BoxGeometry(1, 1, 1), material)
scene.add(cube)
cube.position.x = x
return cube;
}
const addSceneContents = (scene) => {
const light = new THREE.DirectionalLight(0xffffff, 1)
light.position.set(-1, 2, 4)
scene.add(light)
return [
makeCube(scene, 0x44aa88, 0),
makeCube(scene, 0x8844aa, -2),
makeCube(scene, 0xaa8844, 2)
]
}
const main = () => {
const canvas = document.querySelector("#c")
const renderer = new THREE.WebGLRenderer({ canvas })
renderer.setSize(512, 512, false)
const fov = 75
const aspect = 1
const near = 0.1
const far = 5
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far)
camera.position.z = 2
const scene = new THREE.Scene()
scene.background = new THREE.Color( 0xff0000 )
let cubes = addSceneContents(scene)
resizeTo320x240(renderer, canvas, camera)
const render = (time) => {
time *= 0.001
cubes.forEach((cube, ndx) => {
const speed = 1 + ndx * 0.1
const rot = time * speed
cube.rotation.x = rot
cube.rotation.y = rot
})
renderer.render(scene, camera)
requestAnimationFrame(render)
}
requestAnimationFrame(render)
}
/*
This function should 'crop' from the whole scene without
distorting the perspective of the camera and ensuring the
canvas is 320x240
e.g. I want the canvas to be the same size and output of
red cropped view below. Eg, no black and the canvas (and
it's red contents) should be in the top left of the corner
of the screen
*/
const resizeTo320x240 = (renderer, canvas, camera) => {
console.log('code goes here')
const desiredWidth = 320
const desiredHeight = 240
const currentSize = renderer.getSize(new THREE.Vector2())
const x = (currentSize.x / 2) - (desiredWidth/2)
const y = (currentSize.y / 2) - (desiredHeight/2)
// Set the size of the renderer to the correct desired output size
renderer.setSize(desiredWidth, desiredHeight, false)
// The camera its is one that should be cropped
// This is referred to as the view offset in three.js
camera.setViewOffset(currentSize.x,currentSize.y,x,y, desiredWidth, desiredHeight)
}
main()
html, body {
margin: 0;
height: 100%;
background-color: grey;
}
#c {
display: block;
background-color: red;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r119/three.min.js"></script>
<canvas id="c"></canvas>

Related

GLTF model and interaction in Three.js

My js skills could be improved to say the least! But struggling with this
I can get my model to load ok into the scene but cannot seem to get the interaction working.
It's like i need to tie in the GLTF file into the raycaster, the below code is part of it. The full Codepen link is below this code.
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedColor = 0;
}
pick(normalizedPosition, scene, camera, time) {
if (this.pickedObject) {
this.pickedObject.material.emissive.setHex(this.pickedObjectSavedColor);
this.pickedObject = undefined;
}
this.raycaster.setFromCamera(normalizedPosition, camera);
const intersectedObjects = this.raycaster.intersectObjects(scene.children);
if (intersectedObjects.length) {
this.pickedObject = intersectedObjects[0].object;
this.pickedObjectSavedColor = this.pickedObject.material.emissive.getHex();
this.pickedObject.material.emissive.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.pickedObject.rotation.y += 0.1 ;
}
}
https://codepen.io/johneemac/pen/abzqdye << FULL Code
Sorry: Cross origin issue with the gltf file on CodePen though! It won't load but you get the idea hopefully.
Super appreciate any help, thanks!
You have to perform the intersection test like so:
const intersectedObjects = this.raycaster.intersectObjects(scene.children, true);
Notice the second argument of intersectObjects(). It indicates that the raycaster should process the entire hierarchy of objects which is necessary in context of a loaded glTF asset.
three.js R112
It's not clear what you're trying to do. GLTF files are collection of materials, animations, geometries, meshes, etc.. so you can't "pick" a GLTF file. You can "pick" individual elements inside. You could write some code that if something is picked, checks of the thing that was picked is one of the meshes loaded in the GLTF scene and then pick every other thing that was loaded in the GLTF scene.
In any case,
You need to give the RayCaster a list of objects to select from. In the original example that was scene.children which is just the list of Boxes added to the root of the scene. But when loading a GLTF, unless you already know the structure of the GLTF because you created the scene yourself you'll need to go find the things you want to be able to select and add them to some list that you can pass to RayCaster.intersectObjects
This code gets all the Mesh objects from the loaded GLTF file
let pickableMeshes = [];
// this is run after loading the gLTT
// get a list of all the meshes in the scene
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
Note that you could also pass true as the second argument to RayCaster.intersectObjects as in rayCaster.intersectObjects(scene.children, true). That's probably rarely what you want though as likely you have things in the scene you don't want the user to be able to select. For example if you only wanted the user to be able to select the cars then something like
// get a list of all the meshes in the scene who's names start with "car"
root.traverse((node) => {
if (node instanceof THREE.Mesh && (/^car/i).test(node.name)) {
pickableMeshes.push(node);
}
});
Then, PickHelper class you used was changing the color of the material on each Box but that only works because each Box has its own material. If the Boxes shared materials then changing the material color would change all the boxes.
Loading a different GLTF most the objects shared the same material so to be able to highlight one requires changing the material used with that object or choosing some other method to highlight the selected thing.
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 30;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
scene.add(cameraPole);
cameraPole.add(camera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
camera.add(light);
}
function frameArea(sizeToFitOnScreen, boxSize, boxCenter, camera) {
const halfSizeToFitOnScreen = sizeToFitOnScreen * 0.5;
const halfFovY = THREE.Math.degToRad(camera.fov * .5);
const distance = halfSizeToFitOnScreen / Math.tan(halfFovY);
// compute a unit vector that points in the direction the camera is now
// in the xz plane from the center of the box
const direction = (new THREE.Vector3())
.subVectors(camera.position, boxCenter)
.multiply(new THREE.Vector3(1, 0, 1))
.normalize();
// move the camera to a position distance units way from the center
// in whatever direction the camera was from the center already
camera.position.copy(direction.multiplyScalar(distance).add(boxCenter));
// pick some near and far values for the frustum that
// will contain the box.
camera.near = boxSize / 100;
camera.far = boxSize * 100;
camera.updateProjectionMatrix();
// point the camera to look at the center of the box
camera.lookAt(boxCenter.x, boxCenter.y, boxCenter.z);
}
let pickableMeshes = [];
{
const gltfLoader = new THREE.GLTFLoader();
gltfLoader.load('https://threejsfundamentals.org/threejs/resources/models/cartoon_lowpoly_small_city_free_pack/scene.gltf', (gltf) => {
const root = gltf.scene;
scene.add(root);
// compute the box that contains all the stuff
// from root and below
const box = new THREE.Box3().setFromObject(root);
const boxSize = box.getSize(new THREE.Vector3()).length();
const boxCenter = box.getCenter(new THREE.Vector3());
// set the camera to frame the box
frameArea(boxSize * 0.7, boxSize, boxCenter, camera);
// get a list of all the meshes in the scen
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
});
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedMaterial = null;
this.selectMaterial = new THREE.MeshBasicMaterial();
this.infoElem = document.querySelector('#info');
}
pick(normalizedPosition, scene, camera, time) {
// restore the color if there is a picked object
if (this.pickedObject) {
this.pickedObject.material = this.pickedObjectSavedMaterial;
this.pickedObject = undefined;
this.infoElem.textContent = '';
}
// cast a ray through the frustum
this.raycaster.setFromCamera(normalizedPosition, camera);
// get the list of objects the ray intersected
const intersectedObjects = this.raycaster.intersectObjects(pickableMeshes);
if (intersectedObjects.length) {
// pick the first object. It's the closest one
this.pickedObject = intersectedObjects[0].object;
// save its color
this.pickedObjectSavedMaterial = this.pickedObject.material;
this.pickedObject.material = this.selectMaterial;
// flash select material color to flashing red/yellow
this.selectMaterial.color.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.infoElem.textContent = this.pickedObject.name;
}
}
}
const pickPosition = {x: 0, y: 0};
const pickHelper = new PickHelper();
clearPickPosition();
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
pickHelper.pick(pickPosition, scene, camera, time);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getCanvasRelativePosition(event) {
const rect = canvas.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top,
};
}
function setPickPosition(event) {
const pos = getCanvasRelativePosition(event);
pickPosition.x = (pos.x / canvas.clientWidth ) * 2 - 1;
pickPosition.y = (pos.y / canvas.clientHeight) * -2 + 1; // note we flip Y
}
function clearPickPosition() {
// unlike the mouse which always has a position
// if the user stops touching the screen we want
// to stop picking. For now we just pick a value
// unlikely to pick something
pickPosition.x = -100000;
pickPosition.y = -100000;
}
window.addEventListener('mousemove', setPickPosition);
window.addEventListener('mouseout', clearPickPosition);
window.addEventListener('mouseleave', clearPickPosition);
window.addEventListener('touchstart', (event) => {
// prevent the window from scrolling
event.preventDefault();
setPickPosition(event.touches[0]);
}, {passive: false});
window.addEventListener('touchmove', (event) => {
setPickPosition(event.touches[0]);
});
window.addEventListener('touchend', clearPickPosition);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
#info { position: absolute; left: 0; top: 0; background: black; color: white; padding: 0.5em; font-family: monospace; }
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/build/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/examples/js/loaders/GLTFLoader.js"></script>
<canvas id="c"></canvas>
<div id="info"></div>

Three.js - Adding texture makes it look black

I m making custom 3D Objects using an image. First i take outline from an image and after getting points i create the shape. Then i m using three.js extrude geometry to make it look like 3D Object.
The issue is the texture i m using is showing complete black. I used this code to scale the texture.
texture.wrapT = texture.wrapS = THREE.RepeatWrapping;
var rx = 1/img.width;
var ry = 1/img.height;
texture.repeat.set(rx,ry);
This gives me the result in the image below:
NOTE: I m using GLTF Exporter.
It is scaling the texture correctly, but i cant set the offset. The image is not arranged properly.
I want to dynamically set the offset as my images will be different every time. I can set the offset manually and achieve the results as show in the image below. But i want this to be dynamic.
NOTE: This is the offset i m setting manually for this image to achieve results.
texture.offset.set(0.188,0.934);
I really need help. Any help will be appreciated. Thanks.
It's not clear what you're trying to do but ...
texture.repeat set how many times a texture repeats so texture.repeat.set(2,3) means repeat twice across and three times down. That means your code texture.repeat.set(1 / img.width, 1 / img.height) will expand the texture so that only 1 pixel is visible.
repeat.set(2, 3);
repeats 2 across 3 down
repeat.set(1/2, 1/3);
repeats 0.5 across .33 down or in other words show half the texture across and 1/3 of the texture down
offset moves the texture where
1 = move it 100% to the left (if the texture repeats there will be no change at 1 since you've move it 100%)
0.5 = move it 50% to the left
0.25 = move it 25% to the left
-0.10 = move it -10% to the right
If you want to move it in pixels this is where you'd use img.width
1/img.width = move it one pixel left
See the example at the bottom of this page
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script type="module">
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r110/build/three.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 75;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 2;
const scene = new THREE.Scene();
const geometry = new THREE.PlaneGeometry(1, 1);
const obs = []; // just an array we can use to rotate the cubes
const loader = new THREE.TextureLoader();
for (let i = 0; i < 10; ++i) {
const texture = loader.load('https://i.imgur.com/ZKMnXce.png');
// expand the texture so only 40% of stretched across the plane
texture.repeat.set(0.4, 0.4);
// randomly offset the texture
texture.offset.set(rand(1), rand(1));
// make it repeat
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.magFilter = THREE.NearestFilter;
const material = new THREE.MeshBasicMaterial({
map: texture,
side: THREE.DoubleSide,
});
const plane = new THREE.Mesh(geometry, material);
plane.position.set(rand(-1, 1), rand(-1, 1), 0);
plane.position.set(rand(-1, 1), rand(-1, 1), 0);
scene.add(plane);
obs.push(plane); // add to our list of obs to rotate
}
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
obs.forEach((obj, ndx) => {
const speed = .2 + ndx * .1;
const rot = time * speed;
obj.rotation.z = rot;
});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>

Make a camera rotate along z axis while moving and changing lookAt (rollercoaster view) in Three.js

Hi i am having a problem maybe you can help me.
I have a camera that is going down a tube following a path. and a camera that rotates around that tube always pointing toward the next point in the tube. However, the camera sometimes can be below or beside the tube like a roller coaster. Like this
I have the position of point a and the position of the camera which is point b. I am always looking at point a+1
var bpoints = this.cameraPathpoints;
var apoints = this.pathPoints;
this.camera.position.copy(bpoints[i]);
this.camera.lookAt(apoints[i+1]);
The camera is always looking at the point correctly however i want that the camera rotates in its z axis so that it is always normal to the tube. I tried making some calculations so that the camera rotates in its z axis so that the camera always faces normal to the tube, however my calculations work only on certain positions. Maybe there is a simpler way to do this. Thank you very much for any help.
var angleRadians = Math.atan2(cpv[this.cameraPos].pos.y - centePoints[this.cameraPos].pos.y, cpv[this.cameraPos].pos.x - centePoints[this.cameraPos].pos.x);
if(angleRadians > 0 && angleRadians > Math.PI/2){
console.log("+90",(Math.PI/2) - angleRadians);
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(angleRadians);
console.log("rotated ", angleRadians * 180/Math.PI);
}
else if(angleRadians > 0 && angleRadians < Math.PI/2 && anglesum >
Math.PI/2){
console.log("-90",(Math.PI/2) - angleRadians);
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ", -angleRadians * 180/Math.PI);
}
else if(angleRadians > 0 && angleRadians < Math.PI/2){
console.log("-90",(Math.PI/2) + angleRadians);
angleRadians = -(Math.PI/2) - (angleRadians/Math.PI/2);
this.camera.rotateZ(angleRadians);
console.log("rotated ", angleRadians * 180/Math.PI);
}
else if(angleRadians < 0 && angleRadians < -Math.PI/2){
console.log("--90");
angleRadians = (Math.PI/2) + angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ",-angleRadians * 180/Math.PI);
}else if(angleRadians < 0 && angleRadians > -Math.PI/2){
console.log("+-90");
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ", -angleRadians * 180/Math.PI);
}
Rather than doing math, make the camera a child of some other THREE.Object3D and use lookAt with that object. Set the camera's position and rotation relative to that object.
Below the object is called the mount. It goes down the path (center of the tube). The camera is a child of mount. The tube has a 1 unit radius so setting the camera.position.y to 1.5 makes it outside the tube. lookAt makes non-camera objects look down positive Z but the camera looks down negative Z so we rotate the camera 180 degrees.
Example:
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xAAAAAA);
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.y = 1.5; // 2 units above the mount
camera.rotation.y = Math.PI; // the mount will lootAt positiveZ
const mount = new THREE.Object3D();
mount.add(camera);
scene.add(mount);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(1, -2, -4);
scene.add(light);
}
const curve = new THREE.Curves.GrannyKnot();
const tubularSegments = 200;
const radius = 1;
const radialSegments = 6;
const closed = true;
const tube = new THREE.TubeBufferGeometry(
curve, tubularSegments, radius, radialSegments, closed);
const texture = new THREE.DataTexture(new Uint8Array([128, 255, 255, 128]),
2, 2, THREE.LuminanceFormat);
texture.needsUpdate = true;
texture.magFilter = THREE.NearestFilter;
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.repeat.set( 100, 4 );
const material = new THREE.MeshPhongMaterial({
map: texture,
color: '#8CF',
flatShading: true,
});
const mesh = new THREE.Mesh(tube, material);
scene.add(mesh);
const target = new THREE.Vector3();
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
const t = time * 0.1 % 1;
curve.getPointAt(t, mount.position);
curve.getPointAt((t + 0.01) % 1, target);
mount.lookAt(target);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/js/CurveExtras.js"></script>
You can easily orient the camera relative to the mount to say look more toward the path or way by setting camera.rotation.x. If you want to rotate around the mount either change the mount's up property or add another object between the mount and the camera and set its Z rotation.
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xAAAAAA);
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.y = 1.5; // 2 units above the mount
camera.rotation.y = Math.PI; // the mount will lootAt positiveZ
const mount = new THREE.Object3D();
const subMount = new THREE.Object3D();
subMount.rotation.z = Math.PI * .5;
subMount.add(camera);
mount.add(subMount);
scene.add(mount);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(1, -2, -4);
scene.add(light);
}
const curve = new THREE.Curves.GrannyKnot();
const tubularSegments = 200;
const radius = 1;
const radialSegments = 6;
const closed = true;
const tube = new THREE.TubeBufferGeometry(
curve, tubularSegments, radius, radialSegments, closed);
const texture = new THREE.DataTexture(new Uint8Array([128, 255, 255, 128]),
2, 2, THREE.LuminanceFormat);
texture.needsUpdate = true;
texture.magFilter = THREE.NearestFilter;
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.repeat.set( 100, 4 );
const material = new THREE.MeshPhongMaterial({
map: texture,
color: '#8CF',
flatShading: true,
});
const mesh = new THREE.Mesh(tube, material);
scene.add(mesh);
const target = new THREE.Vector3();
const target2 = new THREE.Vector3();
const mountToTarget = new THREE.Vector3();
const targetToTarget2 = new THREE.Vector3();
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
const t = time * 0.1 % 1;
curve.getPointAt(t, mount.position);
curve.getPointAt((t + 0.01) % 1, target);
// set mount up to be perpenticular to the
// curve
curve.getPointAt((t + 0.02) % 1, target2);
mountToTarget.subVectors(mount.position, target).normalize();
targetToTarget2.subVectors(target2, target).normalize();
mount.up.crossVectors(mountToTarget, targetToTarget2);
mount.lookAt(target);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/js/CurveExtras.js"></script>

How many times is gl.DrawElements called?

Tell me how many times THREEjs calls the function gl.DrawElements in one frame. Once ?, or on each object the function is caused.
// Render at once //
ONE gl.DrawElements ( box + sphere + plane ) = SCENE
OR
// Render each object independently //
gl.DrawElements ( box ) + gl.DrawElements ( sphere ) + gl.DrawElements ( plane ) = SCENE
I bad write in English, I’m sorry. I hope my question is clear. Thanks for the answer.
You can look up how many times three.js called gl.drawXXX by looking at renderer.info.render.calls
From the example below we see that each "Mesh" has a draw call. If we added shadows it would likely be one draw call per mesh per light drawing shadows. Three.js has optional culling so if something is not visible it might not try to draw it.
'use strict';
/* global THREE */
function main() {
const infoElem = document.querySelector('#info');
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const fov = 75;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 2;
const scene = new THREE.Scene();
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function makeInstance(geometry, color, x) {
const material = new THREE.MeshPhongMaterial({color});
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.x = x;
return cube;
}
const cubes = [
makeInstance(geometry, 0x44aa88, 0),
makeInstance(geometry, 0x8844aa, -2),
makeInstance(geometry, 0xaa8844, 2),
];
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cubes.forEach((cube, ndx) => {
const speed = 1 + ndx * .1;
const rot = time * speed;
cube.rotation.x = rot;
cube.rotation.y = rot;
});
renderer.render(scene, camera);
infoElem.textContent = JSON.stringify(renderer.info, null, 2);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
#info {
position: absolute;
left: 0;
top: 0;
color: white;
font-size: x-small;
}
<canvas id="c"></canvas>
<pre id="info"></pre>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r94/three.min.js"></script>
Another way is to use a tool such as this:
https://spector.babylonjs.com
Or the built in inspector that comes with Firefox (nightly?). It will give you more information on the draw call.

decay and distance with physically correct lighting in three.js

What does the distance setting mean in three.js in relation to physically based lighting?
For non physically based lighting the distance setting is a setting where the light's influence fades out linearly. Effectively
lightAffect = 1 - min(1, distanceFromLight / distance)
I don't know physically based lighting well but it seems to me real lights don't have a distance setting, they just have a power output (lumens) and decay based on the atmosphere density. Three.js has both a power setting and a decay setting although it's not clear at all what decay should be set to as the docs effectively just say to set it to 2.
What should I be setting distance for a physically based PointLight for example if I want physically based lighting?
'use strict';
/* global dat */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
renderer.physicallyCorrectLights = true;
const fov = 45;
const aspect = 2; // the canvas default
const zNear = 0.1;
const zFar = 100;
const camera = new THREE.PerspectiveCamera(fov, aspect, zNear, zFar);
camera.position.set(0, 10, 20);
camera.lookAt(0, 5, 0);
const scene = new THREE.Scene();
scene.background = new THREE.Color('black');
{
const planeSize = 40;
const planeGeo = new THREE.PlaneBufferGeometry(planeSize, planeSize);
const planeMat = new THREE.MeshPhongMaterial({
color: '#A86',
side: THREE.DoubleSide,
});
const mesh = new THREE.Mesh(planeGeo, planeMat);
mesh.rotation.x = Math.PI * -.5;
scene.add(mesh);
} {
const cubeSize = 4;
const cubeGeo = new THREE.BoxBufferGeometry(cubeSize, cubeSize, cubeSize);
const cubeMat = new THREE.MeshPhongMaterial({color: '#8AC'});
const mesh = new THREE.Mesh(cubeGeo, cubeMat);
mesh.position.set(cubeSize + 1, cubeSize / 2, 0);
scene.add(mesh);
}
{
const sphereRadius = 3;
const sphereWidthDivisions = 32;
const sphereHeightDivisions = 16;
const sphereGeo = new THREE.SphereBufferGeometry(sphereRadius, sphereWidthDivisions, sphereHeightDivisions);
const sphereMat = new THREE.MeshPhongMaterial({color: '#CA8'});
const mesh = new THREE.Mesh(sphereGeo, sphereMat);
mesh.position.set(-sphereRadius - 1, sphereRadius + 2, 0);
scene.add(mesh);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.PointLight(color, intensity);
light.power = 800;
light.distance = 20;
light.position.set(0, 10, 5);
scene.add(light);
light.decay = 2;
const helper = new THREE.PointLightHelper(light);
scene.add(helper);
const onChange = () => {
helper.update();
render();
};
setTimeout(onChange);
window.onresize = onChange;
const gui = new dat.GUI();
gui.add(light, 'distance', 0, 100).onChange(onChange);
gui.add(light, 'decay', 0, 4).onChange(onChange);
gui.add(light, 'power', 0, 3000).onChange(onChange);
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
}
}
main();
html, body {
margin: 0;
height: 100%;
}
#c {
width: 100%;
height: 100%;
display: block;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/96/three.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.7.2/dat.gui.min.js"></script>
<canvas id="c"></canvas>
Reading through three.js source and the paper it's linked to, at least as of r95 the distance setting should basically be Infinity for physically based lights.
In the paper they point out physically based lights shine to infinity but of course in a 3D engine that's no good. Most 3D engines need to compute the minimum number of lights per object drawn so a lightDistance setting was added, if the light is further way than lightDistance they can ignore the light. The problem is there will be sharp edge if they just stop using the light past lightDistance so they hacked in a falloff.
three.js copied that lightDistance and falloff setting from the paper but three.js does not cull lights from calculations when lights are far away so there seems to be no reason not to set distance to infinity AFAICT, at least as of r95.

Resources