THREE.js—Fit texture to scaled plane - three.js

I am trying to create a Plane, which is resizable and displays the webcam's video stream.
I have this:
const
video = document.createElement('video'),
mesh = new THREE.Mesh(
new THREE.PlaneGeometry(1, 1),
new THREE.MeshBasicMaterial()
)
;
let stream = null;
mesh.material.map = this.material.map = new THREE.VideoTexture(video);
async startStream () {
const
{ x:width, y:height } = this.scale,
stream = await navigator.mediaDevices.getUserMedia({
video: {
width: { ideal: width },
height: { ideal: height }
}
});
video.srcObject = stream;
video.play();
return stream;
}
async stopStream () { ... }
function onResize(width, height) {
stopStream();
mesh.scale.set(width, height, 1);
stream = await startStream();
}
The webcam can be seen on the Plane, but the video is not scaled correctly. I guess that is because the mesh is scaled. So how can I fit the video/texture on the scaled plane?

Related

GLB animation in three.js is too fast

I have uploaded a glb file with an animation, and the animation is moving extremely fast, and I do not know why.
This is my character's animation code:
class MainChar extends THREE.Object3D {
constructor() {
super();
this.object = new THREE.Object3D();
this.object.position.set(0, 1, 50);
this.object.scale.x=20;
this.object.scale.y=20;
this.object.scale.z=20;
//load house model form blender file
/*
loader.setPath('../../models/characters/');
const gltf = loader.load('Douglas.glb', (gltf) => {
gltf.scene.traverse(c => {
c.castShadow = true;
});
this.object.add( gltf.scene);
});
*/
const loader = new THREE.GLTFLoader();
loader.setPath('../../models/characters/');
const gltf = loader.load('walk.glb', (gltf) => {
gltf.scene.traverse(c => {
c.castShadow = true;
});
this.mixer = new THREE.AnimationMixer( gltf.scene );
this.mixer.timeScale=1/5;
var action = this.mixer.clipAction( gltf.animations[ 0 ] );
action.play();
this.object.add( gltf.scene );
});
//save keyboard bindings
this.keyboard = new THREEx.KeyboardState();
/*
//creating a box (need to change it to a character with animations)
const geometry = new THREE.BoxGeometry( 1, 1, 1 );
const material = new THREE.MeshBasicMaterial( {color: 0x00ff00} );
this.object = new THREE.Mesh( geometry, material );
this.object.scale.x=5;
this.object.scale.y=10;
this.object.scale.z=5;
//starting position for character
this.object.position.set(0, 10, 50);
*/
this.update = function (time) {
if ( this.mixer ){
this.mixer.update( time );
console.log(time);
}
//MOVEMENT OF BOX
//speed
var moveDistance = 0.5 ;
// var rotateAngle = Math.PI / 2 * 0.05;
// move forwards/backwards/left/right
if ( this.keyboard.pressed("W") ){
this.object.translateZ( -moveDistance );
}
if ( this.keyboard.pressed("S") ){
this.object.translateZ( moveDistance );
}
if ( this.keyboard.pressed("A") ){
this.object.translateX( -moveDistance );
}
if ( this.keyboard.pressed("D") ){
this.object.translateX( moveDistance );
}
// move forwards/backwards/left/right
if ( this.keyboard.pressed("up") ){
this.object.translateZ( -moveDistance );
}
if ( this.keyboard.pressed("down") ){
this.object.translateZ( moveDistance );
}
if ( this.keyboard.pressed("left") ){
this.object.translateX( -moveDistance );
}
if ( this.keyboard.pressed("right") ){
this.object.translateX( moveDistance );
}
// FOR CAMERA ROTATIONS
//this.object.rotateOnAxis( new THREE.Vector3(0,1,0), -rotateAngle);
//this.object.rotateOnAxis( new THREE.Vector3(0,1,0), rotateAngle);
//var rotation_matrix = new THREE.Matrix4().identity();
if ( this.keyboard.pressed("Z") )
{
this.object.position.set(0, 1, 50);
this.object.rotation.set(0,0,0);
}
/*
// global coordinates
if ( this.keyboard.pressed("left") )
this.object.position.x -= moveDistance;
if ( this.keyboard.pressed("right") )
this.object.position.x += moveDistance;
if ( this.keyboard.pressed("up") )
this.object.position.z -= moveDistance;
if ( this.keyboard.pressed("down") )
this.object.position.z += moveDistance;
*/
};
}
}
This is the time class that allows the game to be paused, as well as returns the delta time:
class Time {
constructor(){
this.is_pause = false;
this.accumalated_run_time = 0;
this.clock = new THREE.Clock();
this.pause_clock = new THREE.Clock();
}
getRunTime()
{
this.accumalated_run_time += this.clock.getDelta();
return this.accumalated_run_time
}
pause()
{
this.is_pause = true;
}
unpause()
{
this.is_pause = false;
this.clock.getDelta();
}
}
This is the sceneManager that calls up my character for updating animations:
class SceneManager {
constructor(canvas) {
//this entire function renders a scene where you can add as many items as you want to it (e.g. we can create the house and add as
//many items as we want to the house). It renders objects from other javascript files
//------------------------------------------------------------------------------------------------------------------------------------------
//These are supposed to act like constants. DO NOT CHANGE
this.GAME_PAUSE = "pause";
this.GAME_RUN = "run";
//------------------------------------------------------------------------------------------------------------------------------------------
//we use (this) to make variables accessible in other classes
this.time = new Time();
this.game_state = this.GAME_RUN;
this.screenDimensions = {
width: canvas.width,
height: canvas.height
};
//the essentials for rendering a scene
this.scene = this.buildScene();
this.renderer = this.buildRender(this.screenDimensions);
this.camera = this.buildCamera(this.screenDimensions);
this.managers = this.createManagers();
this.loadToScene(this.managers[0].entities);
//allow camera to orbit target (player)
this.controls = new THREE.OrbitControls(this.camera, this.renderer.domElement);
this.controls.target.set(0, 20, 0);
this.controls.update();
}
loadToScene(entities)
{
for (let i = 0 ; i < entities.length ; i++)
{
console.log("before" +i.toString());
this.scene.add(entities[i].object);
console.log("after");
}
}
//this function creates our scene
buildScene() {
//create a new scene
const scene = new THREE.Scene();
//set the scene's background-> in this case it is our skybox
const loader = new THREE.CubeTextureLoader();
//it uses different textures per face of cube
const texture = loader.load([
'../skybox/House/posx.jpg',
'../skybox/House/negx.jpg',
'../skybox/House/posy.jpg',
'../skybox/House/negy.jpg',
'../skybox/House/posz.jpg',
'../skybox/House/negz.jpg'
]);
scene.background = texture;
//if we wanted it to be a colour, it would have been this commented code:
//scene.background = new THREE.Color("#000");
return scene;
}
//this creates a renderer for us
buildRender({ width, height }) {
const renderer = new THREE.WebGLRenderer({
canvas: canvas,
antialias: true, alpha: true
});
renderer.shadowMap.enabled = true;
renderer.shadowMap.type = THREE.PCFSoftShadowMap;
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
return renderer;
}
//create a camera for the screen
buildCamera({ width, height }) {
//SETTING FIELD OF VIEW, ASPECT RATIO (which should generally be width/ height), NEAR AND FAR (anything outside near/ far is clipped)
const aspectRatio = width / height;
const fieldOfView = 60;
const nearPlane = 1;
const farPlane = 1000;
//there are 2 types of cameras: orthographic and perspective- we will use perspective (more realistic)
const camera = new THREE.PerspectiveCamera(fieldOfView, aspectRatio, nearPlane, farPlane);
//set where the camera is
camera.position.set(-50, 50, 70);
return camera;
}
//add subjects to the scene
createManagers() {
const managers=[new EntityManager()];
//can be altered so we can add multiple entities, and depending on which position
//it is, certain ones won't be paused, and some will be
managers[0].register(new GeneralLights());
managers[0].register(new House());
managers[0].register(new MainChar());
managers[0].register(new SceneSubject())
return managers;
}
//this updates the subject/model every frame
update() {
//won't call this loop if it's paused-> only for objects that need to be paused (managers that need to be paused)
if (this.game_state == this.GAME_RUN)
{
const runTime = this.time.getRunTime();
this.managers[0].update(runTime);
}
//update orbit controls
this.controls.update();
this.renderer.render(this.scene, this.camera);
}
//this resizes our game when screen size changed
onWindowResize() {
this.camera.aspect = window.innerWidth / window.innerHeight;
this.camera.updateProjectionMatrix();
this.renderer.setSize(window.innerWidth, window.innerHeight);
}
pause(){ //when pause mode is entered. The pause menu needs to be rendered.
this.game_state = this.GAME_PAUSE;
this.time.pause();
}
unpause(){
this.game_state = this.GAME_RUN;
this.time.unpause();
}
}
I think the issue is with your AnimationMixer.update() call. If you look at the docs, update is expecting a time-delta in seconds, but it looks like you're passing the total running time. This means it should receive the time passed since the last frame. You can fix this by using clock.getDelta(); as the argument:
this.update = function (time) {
if ( this.mixer ){
const delta = this.clock.getDelta();
this.mixer.update(delta);
}
// ...
}

Pixel Data Length mismatch to Buffer Geometry Vertices Length - Three.js

I am using Mapbox RGB tiles to get elevation data and update Plane Buffer Geometries in Three.js. Then I am planning on using their satellite texture to complete the 3D terrain tile mesh generation. After that I will create a dynamic loading grid system.
Now my problem is that I am getting more image pixel data than vertices. This means that I can't map the pixel data to the vertices Z value for height. I did a quick plug and check to see if I could just get a fixed height and width segment that would work but it does not.
For context the image tiles are at 512x512 resolution. The pixel data length is 520849 and the Float32Array length of the buffer geometry is 519168
Below is the rough code I have that was intended for use in creating a dataset to be served over REST API sending ThreeJSON.
import * as THREE from "three";
import fs from "fs";
import Canvas from "canvas";
import path from "path";
const __dirname = path.resolve(path.dirname(""));
async function getPixels(imagePath) {
try {
const data = await fs.promises.readFile(
__dirname + imagePath,
function (err, data) {
if (err) throw err;
const img = new Canvas.Image(); // Create a new Image
img.src = data;
// Initialize a new Canvas with the same dimensions
// as the image, and get a 2D drawing context for it.
const canvas = new Canvas(img.width, img.height);
const ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, img.width / 4, img.height / 4);
const imgData = context.getImageData(0, 0, img.width, img.height);
return imgData;
}
);
return data;
} catch (err) {
console.error(err);
}
}
function rgbToHeight(r, g, b) {
return -10000 + (r * 256 * 256 + g * 256 + b) * 0.1;
}
export const tileToMesh = async (rgbTilePath, textureTilePath) => {
try {
const pixels = await getPixels(rgbTilePath);
const planeSize = parseInt(Math.sqrt(pixels.length / 4));
const geometry = new THREE.default.PlaneGeometry(
planeSize,
planeSize,
415,415
// planeSize - 1,
// planeSize - 1
);
for (let i = 0; i < pixels.length; i += 4) {
const r = pixels[i + 0];
const g = pixels[i + 1];
const b = pixels[i + 2];
const height = rgbToHeight(r, g, b);
if (!geometry.vertices[i / 4]) {
console.error(`No vertices at index ${i / 4} found.`);
break;
}
geometry.vertices[i / 4].z = height;
}
geometry.verticesNeedUpdate = true;
const texture = new THREE.TextureLoader().load(textureTilePath);
const material = new THREE.MeshBasicMaterial({
map: texture,
side: DoubleSide,
wireframe: true,
});
const mesh = new THREE.Mesh(geometry, material);
return mesh;
} catch (err) {
console.error(err);
}
};

Load 3D files (fbx and obj) using threeJS

I am using ThreeJS to "open" and view 3D files.
At this moment I can only observe a 3D figure in 3D format.
How can I replace this geometrical figure (cube) with a 3D file (obj or fbx)?
In the project I placed two 3D files (one fbx and the other obj) with the name image.
Do I need to use any specific loader to read this type of files?
Can someone help me?
Thanks !
DEMO
html
<canvas #canvas></canvas>
.ts
#ViewChild('canvas') canvasRef: ElementRef;
renderer = new THREE.WebGLRenderer;
scene = null;
camera = null;
controls = null;
mesh = null;
light = null;
private calculateAspectRatio(): number {
const height = this.canvas.clientHeight;
if (height === 0) {
return 0;
}
return this.canvas.clientWidth / this.canvas.clientHeight;
}
private get canvas(): HTMLCanvasElement {
return this.canvasRef.nativeElement;
}
constructor() {
this.scene = new THREE.Scene();
this.camera = new THREE.PerspectiveCamera(35, 800/640, 0.1, 1000)
}
ngAfterViewInit() {
this.configScene();
this.configCamera();
this.configRenderer();
this.configControls();
this.createLight();
this.createMesh();
this.animate();
}
configScene() {
this.scene.background = new THREE.Color( 0xdddddd );
}
configCamera() {
this.camera.aspect = this.calculateAspectRatio();
this.camera.updateProjectionMatrix();
this.camera.position.set( -15, 10, 15 );
this.camera.lookAt( this.scene.position );
}
configRenderer() {
this.renderer = new THREE.WebGLRenderer({
canvas: this.canvas,
antialias: true,
alpha: true
});
this.renderer.setPixelRatio(devicePixelRatio);
this.renderer.setClearColor( 0x000000, 0 );
this.renderer.setSize(this.canvas.clientWidth, this.canvas.clientHeight);
}
configControls() {
this.controls = new OrbitControls(this.camera, this.canvas);
this.controls.autoRotate = false;
this.controls.enableZoom = true;
this.controls.enablePan = true;
this.controls.update();
}
createLight() {
this.light = new THREE.PointLight( 0xffffff );
this.light.position.set( -10, 10, 10 );
this.scene.add( this.light );
}
createMesh() {
const geometry = new THREE.BoxGeometry(5, 5, 5);
const material = new THREE.MeshLambertMaterial({ color: 0xff0000 });
this.mesh = new THREE.Mesh(geometry, material);
this.scene.add(this.mesh);
}
animate() {
window.requestAnimationFrame(() => this.animate());
// this.mesh.rotation.x += 0.01;
// this.mesh.rotation.y += 0.01;
this.controls.update();
this.renderer.render(this.scene, this.camera);
}
Do I need to use any specific loader to read this type of files?
Yes. You have to use THREE.OBJLoader and THREE.FBXLoader if you want to load OBJ or FBX files. For loading OBJ files, the workflow looks like so:
Import the loader:
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader.js';
Load the asset and add it to your scene:
const loader = new OBJLoader();
loader.load('https://threejs.org/examples/models/obj/tree.obj', object => {
this.scene.add(object);
});
BTW: There is no need to use the npm package #avatsaev/three-orbitcontrols-ts. You can import OrbitControls from the three package.
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls.js';

How to crop the png image and remove its unused space using Canvas in flutter?

This attachment is from the rendered canvas image which is saved locally via canvas. In image I have drawn the square box which I want to render in canvas and save locally without left and right extra spaces. I just want to save the square box and remove that unnecessary space of PNG-image. So, how to do this?
widget-source-code:
return CustomPaint(
painter: PngImageCropper(image: image),
);
PngImageCropper-code
class PngImageCropper extends CustomPainter {
PngImageCropper({
this.image,
});
ui.Image image;
#override
void paint(Canvas canvas, Size size) {
_drawCanvas(size, canvas);
_saveCanvas(size);
}
Canvas _drawCanvas(Size size, Canvas canvas) {
final center = Offset(image.width / 2, image.height / 2);
double drawImageWidth = 0;
double drawImageHeight = 0;
Rect rect =
Rect.fromCircle(center: center, radius: _getCircularRadius(image));
Path path = Path()..addOval(rect);
canvas.clipPath(path);
Paint paint = new Paint();
canvas.drawImage(
image,
Offset(drawImageWidth, drawImageHeight),
paint,
);
return canvas;
}
_getCircularRadius(ui.Image image) {
return image.height > image.width
? image.width.toDouble() / 2
: image.height.toDouble() / 2;
}
_saveCanvas(Size size) async {
var pictureRecorder = ui.PictureRecorder();
var canvas = Canvas(pictureRecorder);
var paint = Paint();
paint.isAntiAlias = true;
_drawCanvas(size, canvas);
var pic = pictureRecorder.endRecording();
ui.Image img = await pic.toImage(image.width, image.height);
var byteData = await img.toByteData(format: ui.ImageByteFormat.png);
var buffer = byteData.buffer.asUint8List();
// var response = await get(imgUrl);
var documentDirectory = await getApplicationDocumentsDirectory();
File file = File(join(documentDirectory.path,
'${DateTime.now().toUtc().toIso8601String()}.png'));
file.writeAsBytesSync(buffer);
print(file.path);
}
#override
bool shouldRepaint(CustomPainter oldDelegate) {
return false;
}
}
Worked source code! Answer by #pskink.
Future<List<int>> cropRonded(ui.Image image) async {
var recorder = ui.PictureRecorder();
var canvas = Canvas(recorder);
var imageSize = Size(image.width.toDouble(), image.height.toDouble());
var boundsToCrop = Rect.fromCenter(
center: imageSize.center(Offset.zero),
width: imageSize.shortestSide,
height: imageSize.shortestSide);
var matrix = Matrix4.translationValues(
-boundsToCrop.topLeft.dx, -boundsToCrop.topLeft.dy, 0)
.storage;
var paint = Paint()
..shader = ImageShader(image, TileMode.clamp, TileMode.clamp, matrix);
var radius = imageSize.shortestSide / 2;
canvas.drawCircle(Offset(radius, radius), radius, paint);
ui.Image cropped = await recorder
.endRecording()
.toImage(imageSize.shortestSide.toInt(), imageSize.shortestSide.toInt());
var byteData = await cropped.toByteData(format: ui.ImageByteFormat.png);
return byteData.buffer.asUint8List();
}

How does raycasting in three.js work with an offscreen canvas?

I can't seem to get raycasting to work in an offscreen canvas.
A click event sends data to the worker like this:
var r = document.getElementById('webGL').getBoundingClientRect()
offscreen.postMessage({
action: 'set_scene',
mesh: mesh.toJSON(),
camera: camera.toJSON(),
canvasSize: {
width: document.getElementById('webGL').clientWidth,
height: document.getElementById('webGL').clientHeight
},
coordinates: { x: e.originalEvent.clientX - r.x, y: e.originalEvent.clientY - r.y },
time: (new Date())
});

while the worker looks like this:
self.importScripts( './three.min.js' );
var loader = new THREE.ObjectLoader();
var scene = new THREE.Scene();
self.onmessage = function(e) {
// var workerResult = 'Result: ' + (e.data[0] * e.data[1]);
var canvas = new OffscreenCanvas(e.data.canvasSize.width, e.data.canvasSize.height);
var renderer = new THREE.WebGLRenderer( { antialias: true, canvas: canvas, preserveDrawingBuffer: true } );
Promise.all([
(new Promise(function(resolve, reject) {
loader.parse(
e.data.mesh,
function ( obj ) {
resolve( obj );
})
})),
(new Promise(function(resolve, reject) {
loader.parse(
e.data.camera,
function ( obj ) {
resolve( obj );
})
}))
]).then(obj => {
var mesh, camera
[mesh, camera] = obj;
scene.add( mesh );
renderer.render( scene, camera );
var raycaster = new THREE.Raycaster();
p = { x: e.data.coordinates.x, y: e.data.coordinates.y };
var m = {};
m.x = (p.x) / e.data.canvasSize.width * 2 - 1;
m.y = 1 - (p.y) / e.data.canvasSize.height * 2;
raycaster.setFromCamera( m, camera );
var intersects = raycaster.intersectObjects( [ mesh ], true );
return intersects;
}).then(r => {
self.postMessage(r);
}).catch(e => {
console.log(e);
})
}
Same code onscreen works ok, and the values resulting from the transforms check out ok.
Is it possible to do such a thing at all, or what am I getting wrong?
I don't see the issue in your code but there is absolutely nothing special about picking offscreen. Here's a working example to prove it. It doesn't have any three or camera or mesh in the main page, only in the worker.
All the main page does is start the worker, transfer control of the canvas to the worker, then send resize and mouse events to the worker. That's it. Otherwise the code in the worker is 99% the same as the code would be in the main page. The only major difference is resizeCanvasToDisplaySize gets the display size from state.width and state.height. The non-offscreen version code comes from here
You need to post more code.
function main() {
const canvas = document.querySelector("#c");
if (!canvas.transferControlToOffscreen) {
alert('no offscreen canvas support');
return;
}
const offscreen = canvas.transferControlToOffscreen();
const workerScript = document.querySelector('#foo').text;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const url = URL.createObjectURL(blob);
const worker = new Worker(url);
worker.postMessage({type: 'init', canvas: offscreen}, [offscreen]);
function sendSize() {
worker.postMessage({
type: 'size',
width: canvas.clientWidth,
height: canvas.clientHeight,
});
}
sendSize();
window.addEventListener('resize', sendSize);
function getNormaizedMousePosition(element, e) {
const rect = element.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
return {
x: x / canvas.clientWidth * 2 - 1,
y: y / canvas.clientHeight * -2 + 1,
}
}
canvas.addEventListener('mousemove', (e) => {
const pos = getNormaizedMousePosition(canvas, e);
worker.postMessage({
type: 'mousemove',
x: pos.x,
y: pos.y,
});
});
}
main();
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script type="foo" id="foo">
'use strict';
importScripts('https://threejsfundamentals.org/threejs/resources/threejs/r103/three.min.js');
/* global THREE */
const state = {
width: 300,
height: 150,
mouse: {
x: -2,
y: -2,
},
};
function init(data) {
const {canvas} = data;
const renderer = new THREE.WebGLRenderer({
canvas
});
state.width = canvas.width;
state.height = canvas.height;
const fov = 75;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 100;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 4;
const scene = new THREE.Scene();
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function makeInstance(geometry, color, x) {
const material = new THREE.MeshPhongMaterial({
color
});
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.x = x;
return cube;
}
const cubes = [
makeInstance(geometry, 0x44aa88, 0),
makeInstance(geometry, 0x8844aa, -2),
makeInstance(geometry, 0xaa8844, 2),
];
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedColor = 0;
}
pick(normalizedPosition, scene, camera, time) {
// restore the color if there is a picked object
if (this.pickedObject) {
this.pickedObject.material.emissive.setHex(this.pickedObjectSavedColor);
this.pickedObject = undefined;
}
// cast a ray through the frustum
this.raycaster.setFromCamera(normalizedPosition, camera);
// get the list of objects the ray intersected
const intersectedObjects = this.raycaster.intersectObjects(scene.children);
if (intersectedObjects.length) {
// pick the first object. It's the closest one
this.pickedObject = intersectedObjects[0].object;
// save its color
this.pickedObjectSavedColor = this.pickedObject.material.emissive.getHex();
// set its emissive color to flashing red/yellow
this.pickedObject.material.emissive.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
}
}
}
const pickHelper = new PickHelper();
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = state.width;
const height = state.height;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.width / canvas.height;
camera.updateProjectionMatrix();
}
cubes.forEach((cube, ndx) => {
const speed = 1 + ndx * .1;
const rot = time * speed;
cube.rotation.x = rot;
cube.rotation.y = rot;
});
pickHelper.pick(state.mouse, scene, camera, time);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
function size(data) {
state.width = data.width;
state.height = data.height;
}
function mousemove(data) {
state.mouse.x = data.x;
state.mouse.y = data.y;
}
const handlers = {
init,
size,
mousemove,
};
self.onmessage = function(e) {
const fn = handlers[e.data.type];
if (!fn) {
throw new Error('no handler for type: ' + e.data.type);
}
fn(e.data);
};
</script>
For your particular case though you shouldn't need a camera. Send the ray. You don't need the canvas size either. Picking in three.js is CPU based
function main() {
const canvas = document.querySelector("#c");
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 4;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function makeInstance(geometry, color, x, name) {
const material = new THREE.MeshPhongMaterial({
color
});
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.name = name;
cube.position.x = x;
cube.rotation.x = x;
cube.rotation.z = x + .7;
return cube;
}
makeInstance(geometry, 0x44aa88, 0, 'cyan cube');
makeInstance(geometry, 0x8844aa, -2, 'purple cube');
makeInstance(geometry, 0xaa8844, 2, 'brown cube');
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.width / canvas.height;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
}
render();
window.addEventListener('resize', render);
const workerScript = document.querySelector('#foo').text;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const url = URL.createObjectURL(blob);
const worker = new Worker(url);
const msgElem = document.querySelector('#msg');
worker.onmessage = (e) => {
msgElem.textContent = e.data;
};
worker.postMessage({type: 'init', scene: scene.toJSON()});
function getNormaizedMousePosition(element, e) {
const rect = element.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
return {
x: x / canvas.clientWidth * 2 - 1,
y: y / canvas.clientHeight * -2 + 1,
}
}
const raycaster = new THREE.Raycaster();
canvas.addEventListener('mousemove', (e) => {
const pos = getNormaizedMousePosition(canvas, e);
raycaster.setFromCamera(pos, camera);
worker.postMessage({
type: 'intersect',
origin: raycaster.ray.origin.toArray(),
direction: raycaster.ray.direction.toArray(),
});
});
}
main();
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
#msg {
position: absolute;
left: 1em;
top: 1em;
}
<canvas id="c"></canvas>
<div id="msg"></div>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r103/three.min.js"></script>
<script type="foo" id="foo">
'use strict';
importScripts('https://threejsfundamentals.org/threejs/resources/threejs/r103/three.min.js');
/* global THREE */
function loadObject(json) {
return new Promise((resolve) => {
const loader = new THREE.ObjectLoader();
loader.parse(json, resolve);
});
}
const renderer = new THREE.WebGLRenderer({
canvas: new OffscreenCanvas(1, 1),
});
// settings not important
const camera = new THREE.PerspectiveCamera(1, 1, 0.1, 100);
const raycaster = new THREE.Raycaster();
let scene;
let lastIntersectedObject;
async function init(data) {
scene = await loadObject(data.scene);
// we only need to render once to init the scene
renderer.render(scene, camera);
}
function intersect(data) {
raycaster.set(
new THREE.Vector3(...data.origin),
new THREE.Vector3(...data.direction));
const intersections = raycaster.intersectObjects(scene.children);
const intersectedObject = intersections.length
? intersections[0].object
: null;
if (intersectedObject !== lastIntersectedObject) {
lastIntersectedObject = intersectedObject;
log('intersection:', lastIntersectedObject ? lastIntersectedObject.name : 'none');
}
}
const handlers = {
init,
intersect,
};
self.onmessage = function(e) {
const fn = handlers[e.data.type];
if (!fn) {
throw new Error('no handler for type: ' + e.data.type);
}
fn(e.data);
};
function log(...args) {
postMessage([...args].join(' '));
}
</script>

Resources