I'm trying to implement raycasting for the Points object.
The problem is that the raycaster selection doesn't match the pointer position.
I took as reference these 2 examples from three:
webgl_interactive_raycasting_points
webgl_interactive_points
but i can't still figure out what i am doing wrong.
here is my code pen:
https://codepen.io/simone-tasca/pen/YzapWMN
let scene = new THREE.Scene()
const near = 0.1
const far = 5000
const fov = 30
const aspect = window.innerWidth / window.innerHeight
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far)
camera.position.set(-1.25, 0.8, -1.9)
let ambientLight = new THREE.AmbientLight('white', 1)
scene.add(ambientLight)
// POINTS CONTAINER ==========================================================
const worldCenter = [-1.07, 0, -6.85]
const rotationCorrection = [0.781, 4.305, 0.28]
const worldMap = new THREE.Object3D()
worldMap.position.set(...worldCenter)
worldMap.rotation.set(...rotationCorrection)
scene.add(worldMap)
// THREE.Points =============================================================
const PARTICLE_SIZE = 0.1
let particles, raycaster, INTERSECTED, pointer
let vertices = []
let names = []
let sizes = []
prepareData(sampleData).forEach(coords => {
vertices.push(...coords)
sizes.push(PARTICLE_SIZE)
})
const geometry = new THREE.BufferGeometry()
geometry.attributes.position = new THREE.Float32BufferAttribute(vertices, 3)
geometry.attributes.size = new THREE.Float32BufferAttribute(sizes, 1)
let material = new THREE.PointsMaterial({
color: 0xffffff,
transparent: true,
depthTest: true,
depthWrite: false
})
material.onBeforeCompile = shader => {
shader.vertexShader =
shader.vertexShader.replace('uniform float size;', 'attribute float size;')
}
particles = new THREE.Points(geometry, material)
worldMap.add(particles)
// RAYCASTER =============================================================
raycaster = new THREE.Raycaster()
pointer = new THREE.Vector2(99999, 99999)
document.addEventListener('pointermove', (event) => {
pointer.x = (event.clientX / window.innerWidth) * 2 - 1;
pointer.y = - (event.clientY / window.innerHeight) * 2 + 1;
})
document.addEventListener('pointerout', () => pointer.set(99999, 99999))
// RENDERING ==================================================================
let canvas = document.querySelector('#c')
const renderer = new THREE.WebGLRenderer({
canvas: canvas, alpha: false
})
function resizeRendererToDisplaySize(renderer) {
const width = canvas.clientWidth
const height = canvas.clientHeight
const needResize = canvas.width !== width || canvas.height !== height
if (needResize) renderer.setSize(width, height, false)
return needResize
}
function render(time) {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement
camera.aspect = canvas.clientWidth / canvas.clientHeight
camera.updateProjectionMatrix()
}
const geometry = particles.geometry
const attributes = geometry.attributes
raycaster.setFromCamera(pointer, camera)
let intersects = raycaster.intersectObject(particles)
if (intersects.length > 0) {
if (INTERSECTED != intersects[0].index) {
console.log(intersects[0].index)
attributes.size.array[INTERSECTED] = PARTICLE_SIZE
INTERSECTED = intersects[0].index
attributes.size.array[INTERSECTED] = PARTICLE_SIZE * 3
attributes.size.needsUpdate = true
}
} else if (INTERSECTED !== null) {
attributes.size.array[INTERSECTED] = PARTICLE_SIZE
attributes.size.needsUpdate = true
INTERSECTED = null
}
renderer.render(scene, camera)
requestAnimationFrame(render)
}
render()
You're very close. The only thing missing is to declare how "wide" you want your raycaster to be. Add this line after initiating the raycaster:
raycaster = new THREE.Raycaster()
raycaster.params.Points.threshold = 0.05;
The threshold is by default 1 unit wide. Think of this as painting with a very broad brush, the first particle you'll hit may not be the closest to your mouse pointer. So when you get intersects[0].index, it's going to be the first particle you hit with that broad ray (closest to the camera), not the closest one to your mouse. If you declare a narrower threshold, your ray will be more precise and you'll get more accurate results.
https://threejs.org/docs/#api/en/core/Raycaster.params
Related
I want these cylinders to rest just on top of the terrain. I've tried using raycasters of different orientation similar to but the raytracer distance and point measurements don't seem correct - when I apply them, the cylinders fall through the bottom of the grid. It's as if they are looking at the non-transformed, non-displacement map mesh. How to get the intersects to line up perfectly with the actual displacement map mesh
https://github.com/ledlogic/terrain
/* from https://www.youtube.com/watch?v=2AQLMZwQpDo */
import './style.css'
import * as THREE from 'three'
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls.js'
import * as dat from 'dat.gui'
const loader = new THREE.TextureLoader()
const heightImg = loader.load('/height.png')
const textureImg = loader.load('/texture.jpg')
const alphaImg = loader.load('/alpha-02.png')
// Debug
const gui = new dat.GUI()
// Canvas
const canvas = document.querySelector('canvas.webgl')
// Scene
const scene = new THREE.Scene()
// Objects
const width = 3;
const height = 3;
const widthSegments = 1500;
const heightSegments = 1500;
const geometry = new THREE.PlaneBufferGeometry(width, height, widthSegments, heightSegments)
// Materials
const material = new THREE.MeshStandardMaterial({
color: 'gray',
map: textureImg,
displacementMap: heightImg,
displacementScale: 0.5,
depthTest: true
})
// Mesh
const planeMesh = new THREE.Mesh(geometry, material);
scene.add(planeMesh);
planeMesh.rotation.x = -Math.PI/2
// Lights
const pointLight = new THREE.PointLight('#dcdcff', 2)
pointLight.position.x = 3
pointLight.position.y = 3
pointLight.position.z = 3
scene.add(pointLight)
// Sizes
const sizes = {
width: window.innerWidth,
height: window.innerHeight
}
window.addEventListener('resize', () => {
// Update sizes
sizes.width = window.innerWidth
sizes.height = window.innerHeight
// Update camera
camera.aspect = sizes.width / sizes.height
camera.updateProjectionMatrix()
// Update renderer
renderer.setSize(sizes.width, sizes.height)
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
})
// Base camera
const camera = new THREE.PerspectiveCamera(75, sizes.width / sizes.height, 0.1, 100)
camera.position.y = 1
scene.add(camera)
// Renderer
const renderer = new THREE.WebGLRenderer({
canvas: canvas
})
renderer.setSize(sizes.width, sizes.height)
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2))
// Cylinder
var gridMin = -1.4
var gridMax = 1.4
var gridDelta = 0.2
var vector = new THREE.Vector3(0, 0, -1)
for (var x=gridMin;x<gridMax;x+=gridDelta) {
for (var z=gridMin;z<gridMax;z+=gridDelta) {
const cylinderGeometry = new THREE.CylinderGeometry( 0.01, 0.01, 0.05, 20 );
const cylinderMaterial = new THREE.MeshStandardMaterial( {color: 'gray'} );
const cylinderMesh = new THREE.Mesh( cylinderGeometry, cylinderMaterial );
cylinderMesh.position.x = x
cylinderMesh.position.y = 0.5
cylinderMesh.position.z = z
// fix y positions.
/*
var raycaster = new THREE.Raycaster();
raycaster.set(cylinderMesh.position, vector);
var velocity = new THREE.Vector3();
var intersects = raycaster.intersectObject(planeMesh);
if (intersects.length) {
var d = intersects[0].distance;
//cylinderMesh.translateY(-d);
}
*/
scene.add(cylinderMesh);
}
}
var mwdelta = 0;
document.addEventListener( 'mousewheel', (event) => {
mwdelta +=event.deltaY;
});
// Clock
let cameraRadius = 3
const clock = new THREE.Clock()
const tick = () => {
const elapsedTime = clock.getElapsedTime()
var t = 0.125 * elapsedTime
var zoomRadius = cameraRadius + mwdelta / 500
camera.position.x = zoomRadius * Math.sin(t)
camera.position.z = zoomRadius * Math.cos(t)
camera.rotation.y = t
// Render
renderer.render(scene, camera)
// Call tick again on the next frame
window.requestAnimationFrame(tick)
}
tick()
I'm using Box3 to detect intersections so player could collect coins. I'd like the coin to be removed after detecting that it's intersecting with player but for some reason, I can't remove its (coin's) Box3.
I've read the documentation and deduced that Box3 is connected to the item's geometry, but removing the geometry and removing the item from the scene doesn't seem to remove Box3; it just stays in place, still interacting with player.
My code fiddle: https://jsfiddle.net/ImLost/g3mu1fqe/2/
Code:
function main() {
const canvas = document.querySelector('#canva');
const renderer = new THREE.WebGLRenderer({ canvas });
renderer.setSize(window.innerWidth, window.innerHeight);
/* Camera */
const fov = 40;
const aspect = window.innerWidth / window.innerHeight;
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 65, -45);
camera.up.set(0, 0, 1);
camera.lookAt(0, 0, 0);
const scene = new THREE.Scene();
/* Lights */
const mainLight = new THREE.DirectionalLight(0xffffff, .85);
mainLight.position.set(0, 20, 0);
scene.add(mainLight);
mainLight.castShadow = true;
mainLight.shadow.mapSize.width = 2048;
mainLight.shadow.mapSize.height = 2048;
/* Board */
const boardGeometry = new THREE.PlaneGeometry(50, 50);
const boardMaterial = new THREE.MeshToonMaterial({ color: 0xEEEEEE, side: THREE.DoubleSide });
const board = new THREE.Mesh(boardGeometry, boardMaterial);
board.rotation.x = Math.PI / 2; //The board must be placed flat on the x axis
scene.add(board);
/* Player */
const playerBox = new THREE.Box3() // Used to determine collisions
const playerGeometry = new THREE.BoxGeometry(1, 1, 1.5);
const playerMaterial = new THREE.MeshToonMaterial({ color: 0xAAAAAA });
const player = new THREE.Mesh(playerGeometry, playerMaterial);
player.geometry.computeBoundingBox(playerBox);
scene.add(player);
/* Box helper */
const playerHelper = new THREE.Box3Helper(playerBox, 0xffff00);
scene.add(playerHelper);
/* Coin */
const smallCollectibleRadius = .4
const bigCollectibleRadius = .6
const coinBox = new THREE.Box3();
const coinGeometry = new THREE.SphereGeometry(smallCollectibleRadius, 100, 100);
const coinMaterial = new THREE.MeshToonMaterial({ color: 0xffff00, emissive: 0xffff00 });
const coin = new THREE.Mesh(coinGeometry, coinMaterial);
coin.position.set(0, 0, 3)
scene.add(coin);
coin.geometry.computeBoundingBox(coinBox);
const coinHelper = new THREE.Box3Helper(coinBox, 0xffff00);
scene.add(coinHelper);
function checkCollision(box) {
var collision = playerBox.intersectsBox(box);
if (collision == true) {
return true
}
}
document.addEventListener("keypress", function (event) {
if (checkCollision(coinBox)) {
console.log("Yummy coin!")
coinGeometry.dispose()
coin.geometry.dispose()
scene.remove(coin)
}
});
function render(time) {
time *= 0.001;
const speed = 0.0005
const rotSpeed = 0.00005
const dir = new THREE.Vector3();
playerBox.copy(player.geometry.boundingBox).applyMatrix4(player.matrixWorld);
coinBox.copy(coin.geometry.boundingBox).applyMatrix4(coin.matrixWorld);
document.addEventListener("keypress", function (event) {
if (event.keyCode == 119) {
player.getWorldDirection(dir);
player.position.addScaledVector(dir, speed);
}
if (event.keyCode == 115) {
player.getWorldDirection(dir);
player.position.addScaledVector(dir, -speed);
}
if (event.keyCode == 97) {
player.rotation.y += rotSpeed
}
if (event.keyCode == 100) {
player.rotation.y -= rotSpeed
}
});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
coinGeometry.dispose()
coin.geometry.dispose()
scene.remove(coin)
The code above does not invalidate your coin object, nor its .geometry property--it simply discards the buffer and attribute data from memory. Other properties, like boundingBox still exist. Otherwise, you would be getting errors when you copy the bounding box into coinBox after the coin has been "consumed."
Now, you can invalidate the whole coin by setting it to null:
scene.remove(coin)
coin = null
However, JavaScript is garbage-collected, and you may still be able to access the object before it is actually removed from the heap. I would recommend a simple logical workaround:
scene.remove(coin)
coin.userData.consumed = true
coin = null
Then in your renderer and key event listener, add checks for the new property:
document.addEventListener("keypress", function (event) {
if (coin !== null && !('consumed' in coin.userData) && checkCollision(coinBox)) {
playerBox.copy(player.geometry.boundingBox).applyMatrix4(player.matrixWorld);
if( coin !== null && !('consumed' in coin.userData) ){
coinBox.copy(coin.geometry.boundingBox).applyMatrix4(coin.matrixWorld);
}
I have a THREE.ArrayCamera with 4 cameras and trying to make my object visible from only 1 camera. I saw I can use Layers, which work well when I have 1 camera and doesn’t work at all with Array cameras. Object isn’t visible inside all cameras, despite it has layer 1 and camera.cameras[2] has layer1 enabled. JSFIDDLE: https://jsfiddle.net/h7u02jLw/
mesh = new THREE.Mesh( geometryCylinder, materialCylinder );
mesh.castShadow = true;
mesh.receiveShadow = true;
scene.add( mesh );
mesh.layers.set(1);
camera.cameras[2].layers.enable(1);
light.layers.enable(1);
console.log(camera.cameras[2].layers.test(mesh.layers))
var material2 = new THREE.MeshPhongMaterial({color: 0x00FF00});
mesh2 = new THREE.Mesh( geometryCylinder, material2 );
mesh2.castShadow = true;
mesh2.receiveShadow = true;
scene.add( mesh2 );
The combination of ArrayCamera and Layers has only limited support.
To make the red cylinder render in only one view, it's not sufficient to just enabled the layer on the sub camera. All layers you are going to use have to be enabled on the array camera, too. The following code demonstrated this.
However, you immediately see a rendering issue since shadows are rendered on all views. That happens because the shadow map is updated only once per frame and the layer configuration of the array camera is evaluated. Since all layers are enabled, all views show the shadow of the red cylinder. Shadow maps per sub camera are not supported.
let camera, scene, renderer;
let mesh, mesh2;
const AMOUNT = 2;
init();
animate();
function init() {
const ASPECT_RATIO = window.innerWidth / window.innerHeight;
const WIDTH = (window.innerWidth / AMOUNT) * window.devicePixelRatio;
const HEIGHT = (window.innerHeight / AMOUNT) * window.devicePixelRatio;
const cameras = [];
for (let y = 0; y < AMOUNT; y++) {
for (let x = 0; x < AMOUNT; x++) {
const subcamera = new THREE.PerspectiveCamera(40, ASPECT_RATIO, 0.1, 10);
subcamera.viewport = new THREE.Vector4(Math.floor(x * WIDTH), Math.floor(y * HEIGHT), Math.ceil(WIDTH), Math.ceil(HEIGHT));
subcamera.position.x = (x / AMOUNT) - 0.5;
subcamera.position.y = 0.5 - (y / AMOUNT);
subcamera.position.z = 1.5;
subcamera.position.multiplyScalar(2);
subcamera.lookAt(0, 0, 0);
subcamera.updateMatrixWorld();
cameras.push(subcamera);
}
}
camera = new THREE.ArrayCamera(cameras);
camera.layers.enable(1);
camera.position.z = 3;
scene = new THREE.Scene();
scene.add(new THREE.AmbientLight(0x222244));
const light = new THREE.DirectionalLight();
light.position.set(0.5, 0.5, 1);
light.castShadow = true;
light.shadow.camera.zoom = 4; // tighter shadow map
scene.add(light);
const geometryBackground = new THREE.PlaneGeometry(100, 100);
const materialBackground = new THREE.MeshPhongMaterial({
color: 0x000066
});
const background = new THREE.Mesh(geometryBackground, materialBackground);
background.receiveShadow = true;
background.position.set(0, 0, -1);
scene.add(background);
const geometryCylinder = new THREE.CylinderGeometry(0.5, 0.5, 1, 32);
const materialCylinder = new THREE.MeshPhongMaterial({
color: 0xff0000
});
mesh = new THREE.Mesh(geometryCylinder, materialCylinder);
mesh.castShadow = true;
mesh.receiveShadow = true;
scene.add(mesh);
mesh.layers.set(1);
camera.cameras[2].layers.enable(1);
//light.layers.set(1);
var material2 = new THREE.MeshPhongMaterial({
color: 0x00FF00
});
mesh2 = new THREE.Mesh(geometryCylinder, material2);
mesh2.castShadow = true;
mesh2.receiveShadow = true;
scene.add(mesh2);
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.shadowMap.enabled = true;
document.body.appendChild(renderer.domElement);
//
window.addEventListener('resize', onWindowResize);
}
function onWindowResize() {
const ASPECT_RATIO = window.innerWidth / window.innerHeight;
const WIDTH = (window.innerWidth / AMOUNT) * window.devicePixelRatio;
const HEIGHT = (window.innerHeight / AMOUNT) * window.devicePixelRatio;
camera.aspect = ASPECT_RATIO;
camera.updateProjectionMatrix();
for (let y = 0; y < AMOUNT; y++) {
for (let x = 0; x < AMOUNT; x++) {
const subcamera = camera.cameras[AMOUNT * y + x];
subcamera.viewport.set(
Math.floor(x * WIDTH),
Math.floor(y * HEIGHT),
Math.ceil(WIDTH),
Math.ceil(HEIGHT));
subcamera.aspect = ASPECT_RATIO;
subcamera.updateProjectionMatrix();
}
}
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
mesh.rotation.x += 0.005;
mesh.rotation.z += 0.01;
renderer.render(scene, camera);
requestAnimationFrame(animate);
}
body {
margin: 0;
}
<script src="https://cdn.jsdelivr.net/npm/three#0.126.1/build/three.js"></script>
Hi i am having a problem maybe you can help me.
I have a camera that is going down a tube following a path. and a camera that rotates around that tube always pointing toward the next point in the tube. However, the camera sometimes can be below or beside the tube like a roller coaster. Like this
I have the position of point a and the position of the camera which is point b. I am always looking at point a+1
var bpoints = this.cameraPathpoints;
var apoints = this.pathPoints;
this.camera.position.copy(bpoints[i]);
this.camera.lookAt(apoints[i+1]);
The camera is always looking at the point correctly however i want that the camera rotates in its z axis so that it is always normal to the tube. I tried making some calculations so that the camera rotates in its z axis so that the camera always faces normal to the tube, however my calculations work only on certain positions. Maybe there is a simpler way to do this. Thank you very much for any help.
var angleRadians = Math.atan2(cpv[this.cameraPos].pos.y - centePoints[this.cameraPos].pos.y, cpv[this.cameraPos].pos.x - centePoints[this.cameraPos].pos.x);
if(angleRadians > 0 && angleRadians > Math.PI/2){
console.log("+90",(Math.PI/2) - angleRadians);
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(angleRadians);
console.log("rotated ", angleRadians * 180/Math.PI);
}
else if(angleRadians > 0 && angleRadians < Math.PI/2 && anglesum >
Math.PI/2){
console.log("-90",(Math.PI/2) - angleRadians);
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ", -angleRadians * 180/Math.PI);
}
else if(angleRadians > 0 && angleRadians < Math.PI/2){
console.log("-90",(Math.PI/2) + angleRadians);
angleRadians = -(Math.PI/2) - (angleRadians/Math.PI/2);
this.camera.rotateZ(angleRadians);
console.log("rotated ", angleRadians * 180/Math.PI);
}
else if(angleRadians < 0 && angleRadians < -Math.PI/2){
console.log("--90");
angleRadians = (Math.PI/2) + angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ",-angleRadians * 180/Math.PI);
}else if(angleRadians < 0 && angleRadians > -Math.PI/2){
console.log("+-90");
angleRadians = (Math.PI/2) - angleRadians;
this.camera.rotateZ(-angleRadians);
console.log("rotated ", -angleRadians * 180/Math.PI);
}
Rather than doing math, make the camera a child of some other THREE.Object3D and use lookAt with that object. Set the camera's position and rotation relative to that object.
Below the object is called the mount. It goes down the path (center of the tube). The camera is a child of mount. The tube has a 1 unit radius so setting the camera.position.y to 1.5 makes it outside the tube. lookAt makes non-camera objects look down positive Z but the camera looks down negative Z so we rotate the camera 180 degrees.
Example:
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xAAAAAA);
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.y = 1.5; // 2 units above the mount
camera.rotation.y = Math.PI; // the mount will lootAt positiveZ
const mount = new THREE.Object3D();
mount.add(camera);
scene.add(mount);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(1, -2, -4);
scene.add(light);
}
const curve = new THREE.Curves.GrannyKnot();
const tubularSegments = 200;
const radius = 1;
const radialSegments = 6;
const closed = true;
const tube = new THREE.TubeBufferGeometry(
curve, tubularSegments, radius, radialSegments, closed);
const texture = new THREE.DataTexture(new Uint8Array([128, 255, 255, 128]),
2, 2, THREE.LuminanceFormat);
texture.needsUpdate = true;
texture.magFilter = THREE.NearestFilter;
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.repeat.set( 100, 4 );
const material = new THREE.MeshPhongMaterial({
map: texture,
color: '#8CF',
flatShading: true,
});
const mesh = new THREE.Mesh(tube, material);
scene.add(mesh);
const target = new THREE.Vector3();
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
const t = time * 0.1 % 1;
curve.getPointAt(t, mount.position);
curve.getPointAt((t + 0.01) % 1, target);
mount.lookAt(target);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/js/CurveExtras.js"></script>
You can easily orient the camera relative to the mount to say look more toward the path or way by setting camera.rotation.x. If you want to rotate around the mount either change the mount's up property or add another object between the mount and the camera and set its Z rotation.
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xAAAAAA);
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.y = 1.5; // 2 units above the mount
camera.rotation.y = Math.PI; // the mount will lootAt positiveZ
const mount = new THREE.Object3D();
const subMount = new THREE.Object3D();
subMount.rotation.z = Math.PI * .5;
subMount.add(camera);
mount.add(subMount);
scene.add(mount);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(1, -2, -4);
scene.add(light);
}
const curve = new THREE.Curves.GrannyKnot();
const tubularSegments = 200;
const radius = 1;
const radialSegments = 6;
const closed = true;
const tube = new THREE.TubeBufferGeometry(
curve, tubularSegments, radius, radialSegments, closed);
const texture = new THREE.DataTexture(new Uint8Array([128, 255, 255, 128]),
2, 2, THREE.LuminanceFormat);
texture.needsUpdate = true;
texture.magFilter = THREE.NearestFilter;
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.repeat.set( 100, 4 );
const material = new THREE.MeshPhongMaterial({
map: texture,
color: '#8CF',
flatShading: true,
});
const mesh = new THREE.Mesh(tube, material);
scene.add(mesh);
const target = new THREE.Vector3();
const target2 = new THREE.Vector3();
const mountToTarget = new THREE.Vector3();
const targetToTarget2 = new THREE.Vector3();
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
const t = time * 0.1 % 1;
curve.getPointAt(t, mount.position);
curve.getPointAt((t + 0.01) % 1, target);
// set mount up to be perpenticular to the
// curve
curve.getPointAt((t + 0.02) % 1, target2);
mountToTarget.subVectors(mount.position, target).normalize();
targetToTarget2.subVectors(target2, target).normalize();
mount.up.crossVectors(mountToTarget, targetToTarget2);
mount.lookAt(target);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r102/js/CurveExtras.js"></script>
What does the distance setting mean in three.js in relation to physically based lighting?
For non physically based lighting the distance setting is a setting where the light's influence fades out linearly. Effectively
lightAffect = 1 - min(1, distanceFromLight / distance)
I don't know physically based lighting well but it seems to me real lights don't have a distance setting, they just have a power output (lumens) and decay based on the atmosphere density. Three.js has both a power setting and a decay setting although it's not clear at all what decay should be set to as the docs effectively just say to set it to 2.
What should I be setting distance for a physically based PointLight for example if I want physically based lighting?
'use strict';
/* global dat */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
renderer.physicallyCorrectLights = true;
const fov = 45;
const aspect = 2; // the canvas default
const zNear = 0.1;
const zFar = 100;
const camera = new THREE.PerspectiveCamera(fov, aspect, zNear, zFar);
camera.position.set(0, 10, 20);
camera.lookAt(0, 5, 0);
const scene = new THREE.Scene();
scene.background = new THREE.Color('black');
{
const planeSize = 40;
const planeGeo = new THREE.PlaneBufferGeometry(planeSize, planeSize);
const planeMat = new THREE.MeshPhongMaterial({
color: '#A86',
side: THREE.DoubleSide,
});
const mesh = new THREE.Mesh(planeGeo, planeMat);
mesh.rotation.x = Math.PI * -.5;
scene.add(mesh);
} {
const cubeSize = 4;
const cubeGeo = new THREE.BoxBufferGeometry(cubeSize, cubeSize, cubeSize);
const cubeMat = new THREE.MeshPhongMaterial({color: '#8AC'});
const mesh = new THREE.Mesh(cubeGeo, cubeMat);
mesh.position.set(cubeSize + 1, cubeSize / 2, 0);
scene.add(mesh);
}
{
const sphereRadius = 3;
const sphereWidthDivisions = 32;
const sphereHeightDivisions = 16;
const sphereGeo = new THREE.SphereBufferGeometry(sphereRadius, sphereWidthDivisions, sphereHeightDivisions);
const sphereMat = new THREE.MeshPhongMaterial({color: '#CA8'});
const mesh = new THREE.Mesh(sphereGeo, sphereMat);
mesh.position.set(-sphereRadius - 1, sphereRadius + 2, 0);
scene.add(mesh);
}
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.PointLight(color, intensity);
light.power = 800;
light.distance = 20;
light.position.set(0, 10, 5);
scene.add(light);
light.decay = 2;
const helper = new THREE.PointLightHelper(light);
scene.add(helper);
const onChange = () => {
helper.update();
render();
};
setTimeout(onChange);
window.onresize = onChange;
const gui = new dat.GUI();
gui.add(light, 'distance', 0, 100).onChange(onChange);
gui.add(light, 'decay', 0, 4).onChange(onChange);
gui.add(light, 'power', 0, 3000).onChange(onChange);
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
}
}
main();
html, body {
margin: 0;
height: 100%;
}
#c {
width: 100%;
height: 100%;
display: block;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/96/three.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.7.2/dat.gui.min.js"></script>
<canvas id="c"></canvas>
Reading through three.js source and the paper it's linked to, at least as of r95 the distance setting should basically be Infinity for physically based lights.
In the paper they point out physically based lights shine to infinity but of course in a 3D engine that's no good. Most 3D engines need to compute the minimum number of lights per object drawn so a lightDistance setting was added, if the light is further way than lightDistance they can ignore the light. The problem is there will be sharp edge if they just stop using the light past lightDistance so they hacked in a falloff.
three.js copied that lightDistance and falloff setting from the paper but three.js does not cull lights from calculations when lights are far away so there seems to be no reason not to set distance to infinity AFAICT, at least as of r95.