Three.js - uvsNeedUpdate = True not triggering update - three.js

I'm working on a Three.js scene that loads 5 meshes, each with a single material that contains a single image texture. Once those images load, I'm attempting to accomplish the following tasks:
load 20 higher resolution images
update the material property of each mesh to load an array of 5 materials (each with 1 image texture) into each mesh
update the faceVertexUvs of each mesh's geometry to point to the appropriate offsets within the new textures.
The documentation on Geometry has little to say about Geometry.faceVertexUvs, but this SO post suggests one can use the following approach to work with multiple materials in a geometry when using faceVertexUvs:
geometry.faceVertexUvs[ materialIndex ][ faceIndex ][ vertexIndex ]
The trouble is that after running through the steps above, I call meshes[meshIdx].geometry.uvsNeedUpdate = true; but my new materials are not appearing and my meshes stay unchanged. Does anyone have any idea why that might be?
I would be tremendously grateful for any pointers or insight others can offer on this question!
Here is my full code sample:
/**
* Globals
**/
// Create a store for all images contained in the visualization
var imageList = null;
// Create a store for the image atlas files. Each key will represent
// the index position of the atlas file, and the value will point
// to the material at that index position
var materials = {32: {}, 64: {}};
// Create global configs for image and atlas sizing
var image, atlas;
// Create a store of meshes
var meshes = [];
/**
* Create Scene
**/
// Create the scene and a camera to view it
var scene = new THREE.Scene();
/**
* Camera
**/
// Specify the portion of the scene visiable at any time (in degrees)
var fieldOfView = 75;
// Specify the camera's aspect ratio
var aspectRatio = window.innerWidth / window.innerHeight;
/*
Specify the near and far clipping planes. Only objects
between those planes will be rendered in the scene
(these values help control the number of items rendered
at any given time)
*/
var nearPlane = 100;
var farPlane = 50000;
// Use the values specified above to create a camera
var camera = new THREE.PerspectiveCamera(
fieldOfView, aspectRatio, nearPlane, farPlane
);
// Finally, set the camera's position
camera.position.z = 12000;
camera.position.y = -2000;
/**
* Renderer
**/
// Create the canvas with a renderer
var renderer = new THREE.WebGLRenderer({ antialias: true });
// Add support for retina displays
renderer.setPixelRatio( window.devicePixelRatio );
// Specify the size of the canvas
renderer.setSize( window.innerWidth, window.innerHeight );
// Add the canvas to the DOM
document.body.appendChild( renderer.domElement );
/**
* Load External Data
**/
// Identify data endpoint
var dataUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/data/';
// Create a store for image position information
var imagePositions = null;
// Load the image position JSON file
var fileLoader = new THREE.FileLoader();
fileLoader.load(dataUrl + 'image_tsne_projections.json', function(data) {
imagePositions = JSON.parse(data);
conditionallyBuildGeometries(32)
})
/**
* Load Atlas Textures
**/
// List of all textures to be loaded, the size of subimages
// in each, and the total number of atlas files for each size
var textureSets = {
32: { size: 32, count: 5 },
64: { size: 64, count: 20 }
}
// Create a texture loader so we can load our image files
var textureLoader = new THREE.TextureLoader();
function loadTextures(size) {
for (var i=0; i<textureSets[size].count; i++) {
var url = dataUrl + 'atlas_files/' + size + 'px/atlas-' + i + '.jpg';
textureLoader.load(url, handleTexture.bind(null, size, i));
}
}
// Callback function that adds the texture to the list of textures
// and calls the geometry builder if all textures have loaded
function handleTexture(size, idx, texture) {
var material = new THREE.MeshBasicMaterial({ map: texture });
materials[size][idx] = material;
conditionallyBuildGeometries(size, idx)
}
// If the textures and the mapping from image idx to positional information
// are all loaded, create the geometries
function conditionallyBuildGeometries(size, idx) {
if (size === 32) {
var nLoaded = Object.keys(materials[size]).length;
var nRequired = textureSets[size].count;
if (nLoaded === nRequired && imagePositions) {
document.querySelector('#loading').style.display = 'none';
buildGeometry(size);
loadTextures(64)
}
} else {
updateGeometry(size, idx)
}
}
loadTextures(32)
/**
* Build Image Geometry
**/
// Iterate over the textures in the current texture set
// and for each, add a new mesh to the scene
function buildGeometry(size) {
setImageAndAtlasSize(size);
for (var i=0; i<textureSets[size].count; i++) {
// Create one new geometry per atlas
var geometry = new THREE.Geometry();
for (var j=0; j<atlas.cols*atlas.rows; j++) {
geometry = updateVertices(geometry, i, j);
geometry = updateFaces(geometry);
geometry = updateFaceVertexUvs(geometry, j, 0);
}
buildMesh(geometry, materials[size][i]);
}
}
function setImageAndAtlasSize(size) {
// Identify the subimage size in px (width/height) and the
// size of the image as it will be displayed in the map
image = { width: size, height: size, shownWidth: 64, shownHeight: 64 };
// Identify the total number of cols & rows in the image atlas
atlas = { width: 2048, height: 2048, cols: 2048/size, rows: 2048/size };
}
// Get the x, y, z coords for the subimage at index position j
// of atlas in index position i
function getCoords(i, j) {
var idx = (i * atlas.rows * atlas.cols) + j;
var coords = imagePositions[idx];
coords.x *= 2200;
coords.y *= 1200;
coords.z = (-200 + j/100);
return coords;
}
// Add one vertex for each corner of the image, using the
// following order: lower left, lower right, upper right, upper left
function updateVertices(geometry, i, j) {
// Retrieve the x, y, z coords for this subimage
var coords = getCoords(i, j);
geometry.vertices.push(
new THREE.Vector3(
coords.x,
coords.y,
coords.z
),
new THREE.Vector3(
coords.x + image.shownWidth,
coords.y,
coords.z
),
new THREE.Vector3(
coords.x + image.shownWidth,
coords.y + image.shownHeight,
coords.z
),
new THREE.Vector3(
coords.x,
coords.y + image.shownHeight,
coords.z
)
);
return geometry;
}
// Create two new faces for a given subimage, then add those
// faces to the geometry
function updateFaces(geometry) {
// Add the first face (the lower-right triangle)
var faceOne = new THREE.Face3(
geometry.vertices.length-4,
geometry.vertices.length-3,
geometry.vertices.length-2
)
// Add the second face (the upper-left triangle)
var faceTwo = new THREE.Face3(
geometry.vertices.length-4,
geometry.vertices.length-2,
geometry.vertices.length-1
)
// Add those faces to the geometry
geometry.faces.push(faceOne, faceTwo);
return geometry;
}
function updateFaceVertexUvs(geometry, j, materialIdx) {
// Identify the relative width and height of the subimages
// within the image atlas
var relativeW = image.width / atlas.width;
var relativeH = image.height / atlas.height;
// Identify this subimage's offset in the x dimension
// An xOffset of 0 means the subimage starts flush with
// the left-hand edge of the atlas
var xOffset = (j % atlas.cols) * relativeW;
// Identify this subimage's offset in the y dimension
// A yOffset of 0 means the subimage starts flush with
// the bottom edge of the atlas
var yOffset = 1 - (Math.floor(j/atlas.cols) * relativeH) - relativeH;
// Create an empty list of faceVertexUvs for the given material Idx
// if it doesn't exist yet
if (!geometry.faceVertexUvs[materialIdx]) {
geometry.faceVertexUvs[materialIdx] = [];
}
// Use the xOffset and yOffset (and the knowledge that
// each row and column contains only 32 images) to specify
// the regions of the current image
geometry.faceVertexUvs[materialIdx][j*2] = [
new THREE.Vector2(xOffset, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset + relativeH)
];
// Map the region of the image described by the lower-left,
// upper-right, and upper-left vertices to `faceTwo`
geometry.faceVertexUvs[materialIdx][(j*2) + 1] = [
new THREE.Vector2(xOffset, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset + relativeH),
new THREE.Vector2(xOffset, yOffset + relativeH)
];
return geometry;
}
function buildMesh(geometry, material) {
// Convert the geometry to a BuferGeometry for additional performance
//var geometry = new THREE.BufferGeometry().fromGeometry(geometry);
// Combine the image geometry and material into a mesh
var mesh = new THREE.Mesh(geometry, [material]);
// Set the position of the image mesh in the x,y,z dimensions
mesh.position.set(0,0,0)
// Add the image to the scene
scene.add(mesh);
// Save this mesh
meshes.push(mesh);
}
/**
* Update Geometries with new VertexUvs and materials
**/
function updateGeometry(size, idx) {
// Update the image and atlas sizes
setImageAndAtlasSize(size)
// Determine how many of the higher resolution atlas files
// it takes to account for all subimages in a lower resolution
// atlas file
var lowResPerAtlas = (2048/32)**2;
var highResPerAtlas = (2048/64)**2;
var atlasRatio = lowResPerAtlas / highResPerAtlas;
// Determine which of the original meshes the newly-loaded high-res
// atlas corresponds to
var meshIdx = Math.floor(idx/atlasRatio);
// Determine the material index position to use in this mesh.
// The mesh's materials array will look like this:
// mesh.material = [32px, 64px_0, 64px_1, 64px_2, 64px_3, 64_px_4];
var materialIdx = (idx % atlasRatio) + 1;
// Add the newly loaded material into the appropriate mesh
meshes[meshIdx].material[materialIdx] = materials[size][idx];
//console.log(meshIdx, materialIdx, idx, meshes[materialIdx].material)
// Pluck out the geometry of this mesh update:
var geometry = meshes[meshIdx].geometry;
for (var j=0; j<highResPerAtlas; j++) {
geometry = updateFaceVertexUvs(geometry, j, materialIdx);
}
geometry.faceVertexUvs[0] = [];
meshes[meshIdx].geometry = geometry;
meshes[meshIdx].geometry.colorsNeedUpdate = true;
meshes[meshIdx].geometry.groupsNeedUpdate = true;
meshes[meshIdx].geometry.lineDistancesNeedUpdate = true;
meshes[meshIdx].geometry.normalsNeedUpdate = true;
meshes[meshIdx].geometry.uvsNeedUpdate = true;
meshes[meshIdx].geometry.verticesNeedUpdate = true;
// Indicate the material needs update
meshes[meshIdx].material.needsUpdate = true;
}
/**
* Lights
**/
// Add a point light with #fff color, .7 intensity, and 0 distance
var light = new THREE.PointLight( 0xffffff, 1, 0 );
// Specify the light's position
light.position.set(1, 1, 100);
// Add the light to the scene
scene.add(light)
/**
* Add Controls
**/
var controls = new THREE.TrackballControls(camera, renderer.domElement);
/**
* Handle window resizes
**/
window.addEventListener('resize', function() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
controls.handleResize();
});
/**
* Render!
**/
// The main animation function that re-renders the scene each animation frame
function animate() {
requestAnimationFrame( animate );
renderer.render( scene, camera );
controls.update();
}
animate();
* {
margin: 0;
padding: 0;
background: #000;
color: #fff;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.js"></script>
<script src="https://s3-us-west-2.amazonaws.com/yale-dh-staging/tsne/assets/vendor/js/trackball-controls.js"></script>
<div id='loading'>Loading</div>

This is a known bug: https://github.com/mrdoob/three.js/issues/7179
Instead of changing a faceVertexUv by index accessors, one must use a .set() method on the appropriate vector within the faceVertexUv, e.g.:
if (geometry.faceVertexUvs[0][faceIdx]) {
geometry.faceVertexUvs[0][faceIdx][0].set(xOffset, yOffset)
geometry.faceVertexUvs[0][faceIdx][1].set(xOffset + width, yOffset)
geometry.faceVertexUvs[0][faceIdx][2].set(xOffset + width, yOffset + height)
} else {
geometry.faceVertexUvs[0][faceIdx] = [
new THREE.Vector2(xOffset, yOffset),
new THREE.Vector2(xOffset + width, yOffset),
new THREE.Vector2(xOffset + width, yOffset + height)
]
}
Full example:
/**
* Globals
**/
// Create a store for all images contained in the visualization
var imageList = null;
// Create a store for the image atlas files. Each key will represent
// the index position of the atlas file, and the value will point
// to the material at that index position
var materials = {32: {}, 64: {}};
// Create global configs for image and atlas sizing
var image, atlas;
// Create a store of meshes
var meshes = [];
/**
* Create Scene
**/
// Create the scene and a camera to view it
var scene = new THREE.Scene();
/**
* Camera
**/
// Specify the portion of the scene visiable at any time (in degrees)
var fieldOfView = 75;
// Specify the camera's aspect ratio
var aspectRatio = window.innerWidth / window.innerHeight;
/*
Specify the near and far clipping planes. Only objects
between those planes will be rendered in the scene
(these values help control the number of items rendered
at any given time)
*/
var nearPlane = 100;
var farPlane = 50000;
// Use the values specified above to create a camera
var camera = new THREE.PerspectiveCamera(
fieldOfView, aspectRatio, nearPlane, farPlane
);
// Finally, set the camera's position
camera.position.z = 12000;
camera.position.y = -2000;
/**
* Lights
**/
// Add a point light with #fff color, .7 intensity, and 0 distance
var light = new THREE.PointLight( 0xffffff, 1, 0 );
// Specify the light's position
light.position.set(1, 1, 100);
// Add the light to the scene
scene.add(light)
/**
* Renderer
**/
// Create the canvas with a renderer
var renderer = new THREE.WebGLRenderer({ antialias: true });
// Add support for retina displays
renderer.setPixelRatio( window.devicePixelRatio );
// Specify the size of the canvas
renderer.setSize( window.innerWidth, window.innerHeight );
// Add the canvas to the DOM
document.body.appendChild( renderer.domElement );
/**
* Load External Data
**/
// Identify data endpoint
var dataUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/data/';
// Create a store for image position information
var imagePositions = null;
// Load the image position JSON file
var fileLoader = new THREE.FileLoader();
fileLoader.load(dataUrl + 'image_tsne_projections.json', function(data) {
imagePositions = JSON.parse(data);
conditionallyBuildGeometries(32)
})
/**
* Load Atlas Textures
**/
// List of all textures to be loaded, the size of subimages
// in each, and the total count of atlas files for each size
var textureSets = {
32: { size: 32, count: 5 },
64: { size: 64, count: 20 }
}
// Create a texture loader so we can load our image files
var textureLoader = new THREE.TextureLoader();
function loadTextures(size) {
for (var i=0; i<textureSets[size].count; i++) {
var url = dataUrl + 'atlas_files/' + size + 'px/atlas-' + i + '.jpg';
textureLoader.load(url, handleTexture.bind(null, size, i));
}
}
// Create a material from the new texture and call
// the geometry builder if all textures have loaded
function handleTexture(size, idx, texture) {
var material = new THREE.MeshBasicMaterial({ map: texture });
materials[size][idx] = material;
conditionallyBuildGeometries(size, idx)
}
// If the textures and the mapping from image idx to positional information
// are all loaded, create the geometries
function conditionallyBuildGeometries(size, idx) {
if (size === 32) {
var nLoaded = Object.keys(materials[size]).length;
var nRequired = textureSets[size].count;
if (nLoaded === nRequired && imagePositions) {
document.querySelector('#loading').style.display = 'none';
buildGeometry(size);
loadTextures(64)
}
} else {
updateGeometry(size, idx)
}
}
loadTextures(32)
/**
* Build Image Geometry
**/
// Iterate over the textures in the current texture set
// and for each, add a new mesh to the scene
function buildGeometry(size) {
setImageAndAtlasSize(size);
for (var i=0; i<textureSets[size].count; i++) {
// Create one new geometry per set of 1024 images
var geometry = new THREE.Geometry();
geometry.faceVertexUvs[0] = [];
for (var j=0; j<atlas.cols*atlas.rows; j++) {
geometry = updateVertices(geometry, i, j);
geometry = updateFaces(geometry);
geometry = updateFaceVertexUvs(geometry, j);
if ((j+1)%1024 === 0) {
buildMesh(geometry, materials[size][i]);
var geometry = new THREE.Geometry();
}
}
}
}
function setImageAndAtlasSize(size) {
// Identify the subimage size in px (width/height) and the
// size of the image as it will be displayed in the map
image = { width: size, height: size, shownWidth: 64, shownHeight: 64 };
// Identify the total number of cols & rows in the image atlas
atlas = { width: 2048, height: 2048, cols: 2048/size, rows: 2048/size };
}
// Get the x, y, z coords for the subimage at index position j
// of atlas in index position i
function getCoords(i, j) {
var idx = (i * atlas.rows * atlas.cols) + j;
var coords = imagePositions[idx];
coords.x *= 2200;
coords.y *= 1200;
coords.z = (-200 + j/100);
return coords;
}
// Add one vertex for each corner of the image, using the
// following order: lower left, lower right, upper right, upper left
function updateVertices(geometry, i, j) {
// Retrieve the x, y, z coords for this subimage
var coords = getCoords(i, j);
geometry.vertices.push(
new THREE.Vector3(
coords.x,
coords.y,
coords.z
),
new THREE.Vector3(
coords.x + image.shownWidth,
coords.y,
coords.z
),
new THREE.Vector3(
coords.x + image.shownWidth,
coords.y + image.shownHeight,
coords.z
),
new THREE.Vector3(
coords.x,
coords.y + image.shownHeight,
coords.z
)
);
return geometry;
}
// Create two new faces for a given subimage, then add those
// faces to the geometry
function updateFaces(geometry) {
// Add the first face (the lower-right triangle)
var faceOne = new THREE.Face3(
geometry.vertices.length-4,
geometry.vertices.length-3,
geometry.vertices.length-2
)
// Add the second face (the upper-left triangle)
var faceTwo = new THREE.Face3(
geometry.vertices.length-4,
geometry.vertices.length-2,
geometry.vertices.length-1
)
// Add those faces to the geometry
geometry.faces.push(faceOne, faceTwo);
return geometry;
}
function updateFaceVertexUvs(geometry, j) {
// Identify the relative width and height of the subimages
// within the image atlas
var relativeW = image.width / atlas.width;
var relativeH = image.height / atlas.height;
// Identify this subimage's offset in the x dimension
// An xOffset of 0 means the subimage starts flush with
// the left-hand edge of the atlas
var xOffset = (j % atlas.cols) * relativeW;
// Identify this subimage's offset in the y dimension
// A yOffset of 0 means the subimage starts flush with
// the bottom edge of the atlas
var yOffset = 1 - (Math.floor(j/atlas.cols) * relativeH) - relativeH;
// Determine the faceVertexUvs index position
var faceIdx = 2 * (j%1024);
// Use the xOffset and yOffset (and the knowledge that
// each row and column contains only 32 images) to specify
// the regions of the current image. Use .set() if the given
// faceVertex is already defined, due to a bug in updateVertexUvs:
// https://github.com/mrdoob/three.js/issues/7179
if (geometry.faceVertexUvs[0][faceIdx]) {
geometry.faceVertexUvs[0][faceIdx][0].set(xOffset, yOffset)
geometry.faceVertexUvs[0][faceIdx][1].set(xOffset + relativeW, yOffset)
geometry.faceVertexUvs[0][faceIdx][2].set(xOffset + relativeW, yOffset + relativeH)
} else {
geometry.faceVertexUvs[0][faceIdx] = [
new THREE.Vector2(xOffset, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset + relativeH)
]
}
// Map the region of the image described by the lower-left,
// upper-right, and upper-left vertices to `faceTwo`
if (geometry.faceVertexUvs[0][faceIdx+1]) {
geometry.faceVertexUvs[0][faceIdx+1][0].set(xOffset, yOffset)
geometry.faceVertexUvs[0][faceIdx+1][1].set(xOffset + relativeW, yOffset + relativeH)
geometry.faceVertexUvs[0][faceIdx+1][2].set(xOffset, yOffset + relativeH)
} else {
geometry.faceVertexUvs[0][faceIdx+1] = [
new THREE.Vector2(xOffset, yOffset),
new THREE.Vector2(xOffset + relativeW, yOffset + relativeH),
new THREE.Vector2(xOffset, yOffset + relativeH)
]
}
return geometry;
}
function buildMesh(geometry, material) {
// Convert the geometry to a BuferGeometry for additional performance
//var geometry = new THREE.BufferGeometry().fromGeometry(geometry);
// Combine the image geometry and material into a mesh
var mesh = new THREE.Mesh(geometry, material);
// Set the position of the image mesh in the x,y,z dimensions
mesh.position.set(0,0,0)
// Add the image to the scene
scene.add(mesh);
// Save this mesh
meshes.push(mesh);
return mesh;
}
/**
* Update Geometries with new VertexUvs and materials
**/
function updateGeometry(size, idx) {
// Update the image and atlas sizes
setImageAndAtlasSize(size)
// Update the appropriate material
meshes[idx].material = materials[size][idx];
meshes[idx].material.needsUpdate = true;
// Update the facevertexuvs
for (var j=0; j<atlas.cols*atlas.rows; j++) {
meshes[idx].geometry = updateFaceVertexUvs(meshes[idx].geometry, j);
}
meshes[idx].geometry.uvsNeedUpdate = true;
meshes[idx].geometry.verticesNeedUpdate = true;
}
/**
* Add Controls
**/
var controls = new THREE.TrackballControls(camera, renderer.domElement);
/**
* Handle window resizes
**/
window.addEventListener('resize', function() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
controls.handleResize();
});
/**
* Render!
**/
// The main animation function that re-renders the scene each animation frame
function animate() {
requestAnimationFrame( animate );
renderer.render( scene, camera );
controls.update();
}
animate();
* {
margin: 0;
padding: 0;
background: #000;
color: #fff;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.js"></script>
<script src="https://s3-us-west-2.amazonaws.com/yale-dh-staging/tsne/assets/vendor/js/trackball-controls.js"></script>
<div id='loading'>Loading</div>

Related

GLTF model and interaction in Three.js

My js skills could be improved to say the least! But struggling with this
I can get my model to load ok into the scene but cannot seem to get the interaction working.
It's like i need to tie in the GLTF file into the raycaster, the below code is part of it. The full Codepen link is below this code.
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedColor = 0;
}
pick(normalizedPosition, scene, camera, time) {
if (this.pickedObject) {
this.pickedObject.material.emissive.setHex(this.pickedObjectSavedColor);
this.pickedObject = undefined;
}
this.raycaster.setFromCamera(normalizedPosition, camera);
const intersectedObjects = this.raycaster.intersectObjects(scene.children);
if (intersectedObjects.length) {
this.pickedObject = intersectedObjects[0].object;
this.pickedObjectSavedColor = this.pickedObject.material.emissive.getHex();
this.pickedObject.material.emissive.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.pickedObject.rotation.y += 0.1 ;
}
}
https://codepen.io/johneemac/pen/abzqdye << FULL Code
Sorry: Cross origin issue with the gltf file on CodePen though! It won't load but you get the idea hopefully.
Super appreciate any help, thanks!
You have to perform the intersection test like so:
const intersectedObjects = this.raycaster.intersectObjects(scene.children, true);
Notice the second argument of intersectObjects(). It indicates that the raycaster should process the entire hierarchy of objects which is necessary in context of a loaded glTF asset.
three.js R112
It's not clear what you're trying to do. GLTF files are collection of materials, animations, geometries, meshes, etc.. so you can't "pick" a GLTF file. You can "pick" individual elements inside. You could write some code that if something is picked, checks of the thing that was picked is one of the meshes loaded in the GLTF scene and then pick every other thing that was loaded in the GLTF scene.
In any case,
You need to give the RayCaster a list of objects to select from. In the original example that was scene.children which is just the list of Boxes added to the root of the scene. But when loading a GLTF, unless you already know the structure of the GLTF because you created the scene yourself you'll need to go find the things you want to be able to select and add them to some list that you can pass to RayCaster.intersectObjects
This code gets all the Mesh objects from the loaded GLTF file
let pickableMeshes = [];
// this is run after loading the gLTT
// get a list of all the meshes in the scene
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
Note that you could also pass true as the second argument to RayCaster.intersectObjects as in rayCaster.intersectObjects(scene.children, true). That's probably rarely what you want though as likely you have things in the scene you don't want the user to be able to select. For example if you only wanted the user to be able to select the cars then something like
// get a list of all the meshes in the scene who's names start with "car"
root.traverse((node) => {
if (node instanceof THREE.Mesh && (/^car/i).test(node.name)) {
pickableMeshes.push(node);
}
});
Then, PickHelper class you used was changing the color of the material on each Box but that only works because each Box has its own material. If the Boxes shared materials then changing the material color would change all the boxes.
Loading a different GLTF most the objects shared the same material so to be able to highlight one requires changing the material used with that object or choosing some other method to highlight the selected thing.
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 30;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
scene.add(cameraPole);
cameraPole.add(camera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
camera.add(light);
}
function frameArea(sizeToFitOnScreen, boxSize, boxCenter, camera) {
const halfSizeToFitOnScreen = sizeToFitOnScreen * 0.5;
const halfFovY = THREE.Math.degToRad(camera.fov * .5);
const distance = halfSizeToFitOnScreen / Math.tan(halfFovY);
// compute a unit vector that points in the direction the camera is now
// in the xz plane from the center of the box
const direction = (new THREE.Vector3())
.subVectors(camera.position, boxCenter)
.multiply(new THREE.Vector3(1, 0, 1))
.normalize();
// move the camera to a position distance units way from the center
// in whatever direction the camera was from the center already
camera.position.copy(direction.multiplyScalar(distance).add(boxCenter));
// pick some near and far values for the frustum that
// will contain the box.
camera.near = boxSize / 100;
camera.far = boxSize * 100;
camera.updateProjectionMatrix();
// point the camera to look at the center of the box
camera.lookAt(boxCenter.x, boxCenter.y, boxCenter.z);
}
let pickableMeshes = [];
{
const gltfLoader = new THREE.GLTFLoader();
gltfLoader.load('https://threejsfundamentals.org/threejs/resources/models/cartoon_lowpoly_small_city_free_pack/scene.gltf', (gltf) => {
const root = gltf.scene;
scene.add(root);
// compute the box that contains all the stuff
// from root and below
const box = new THREE.Box3().setFromObject(root);
const boxSize = box.getSize(new THREE.Vector3()).length();
const boxCenter = box.getCenter(new THREE.Vector3());
// set the camera to frame the box
frameArea(boxSize * 0.7, boxSize, boxCenter, camera);
// get a list of all the meshes in the scen
root.traverse((node) => {
if (node instanceof THREE.Mesh) {
pickableMeshes.push(node);
}
});
});
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
class PickHelper {
constructor() {
this.raycaster = new THREE.Raycaster();
this.pickedObject = null;
this.pickedObjectSavedMaterial = null;
this.selectMaterial = new THREE.MeshBasicMaterial();
this.infoElem = document.querySelector('#info');
}
pick(normalizedPosition, scene, camera, time) {
// restore the color if there is a picked object
if (this.pickedObject) {
this.pickedObject.material = this.pickedObjectSavedMaterial;
this.pickedObject = undefined;
this.infoElem.textContent = '';
}
// cast a ray through the frustum
this.raycaster.setFromCamera(normalizedPosition, camera);
// get the list of objects the ray intersected
const intersectedObjects = this.raycaster.intersectObjects(pickableMeshes);
if (intersectedObjects.length) {
// pick the first object. It's the closest one
this.pickedObject = intersectedObjects[0].object;
// save its color
this.pickedObjectSavedMaterial = this.pickedObject.material;
this.pickedObject.material = this.selectMaterial;
// flash select material color to flashing red/yellow
this.selectMaterial.color.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
this.infoElem.textContent = this.pickedObject.name;
}
}
}
const pickPosition = {x: 0, y: 0};
const pickHelper = new PickHelper();
clearPickPosition();
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
pickHelper.pick(pickPosition, scene, camera, time);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getCanvasRelativePosition(event) {
const rect = canvas.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top,
};
}
function setPickPosition(event) {
const pos = getCanvasRelativePosition(event);
pickPosition.x = (pos.x / canvas.clientWidth ) * 2 - 1;
pickPosition.y = (pos.y / canvas.clientHeight) * -2 + 1; // note we flip Y
}
function clearPickPosition() {
// unlike the mouse which always has a position
// if the user stops touching the screen we want
// to stop picking. For now we just pick a value
// unlikely to pick something
pickPosition.x = -100000;
pickPosition.y = -100000;
}
window.addEventListener('mousemove', setPickPosition);
window.addEventListener('mouseout', clearPickPosition);
window.addEventListener('mouseleave', clearPickPosition);
window.addEventListener('touchstart', (event) => {
// prevent the window from scrolling
event.preventDefault();
setPickPosition(event.touches[0]);
}, {passive: false});
window.addEventListener('touchmove', (event) => {
setPickPosition(event.touches[0]);
});
window.addEventListener('touchend', clearPickPosition);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
#info { position: absolute; left: 0; top: 0; background: black; color: white; padding: 0.5em; font-family: monospace; }
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/build/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r112/examples/js/loaders/GLTFLoader.js"></script>
<canvas id="c"></canvas>
<div id="info"></div>

Three.js - Adding texture makes it look black

I m making custom 3D Objects using an image. First i take outline from an image and after getting points i create the shape. Then i m using three.js extrude geometry to make it look like 3D Object.
The issue is the texture i m using is showing complete black. I used this code to scale the texture.
texture.wrapT = texture.wrapS = THREE.RepeatWrapping;
var rx = 1/img.width;
var ry = 1/img.height;
texture.repeat.set(rx,ry);
This gives me the result in the image below:
NOTE: I m using GLTF Exporter.
It is scaling the texture correctly, but i cant set the offset. The image is not arranged properly.
I want to dynamically set the offset as my images will be different every time. I can set the offset manually and achieve the results as show in the image below. But i want this to be dynamic.
NOTE: This is the offset i m setting manually for this image to achieve results.
texture.offset.set(0.188,0.934);
I really need help. Any help will be appreciated. Thanks.
It's not clear what you're trying to do but ...
texture.repeat set how many times a texture repeats so texture.repeat.set(2,3) means repeat twice across and three times down. That means your code texture.repeat.set(1 / img.width, 1 / img.height) will expand the texture so that only 1 pixel is visible.
repeat.set(2, 3);
repeats 2 across 3 down
repeat.set(1/2, 1/3);
repeats 0.5 across .33 down or in other words show half the texture across and 1/3 of the texture down
offset moves the texture where
1 = move it 100% to the left (if the texture repeats there will be no change at 1 since you've move it 100%)
0.5 = move it 50% to the left
0.25 = move it 25% to the left
-0.10 = move it -10% to the right
If you want to move it in pixels this is where you'd use img.width
1/img.width = move it one pixel left
See the example at the bottom of this page
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script type="module">
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r110/build/three.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 75;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 2;
const scene = new THREE.Scene();
const geometry = new THREE.PlaneGeometry(1, 1);
const obs = []; // just an array we can use to rotate the cubes
const loader = new THREE.TextureLoader();
for (let i = 0; i < 10; ++i) {
const texture = loader.load('https://i.imgur.com/ZKMnXce.png');
// expand the texture so only 40% of stretched across the plane
texture.repeat.set(0.4, 0.4);
// randomly offset the texture
texture.offset.set(rand(1), rand(1));
// make it repeat
texture.wrapS = THREE.RepeatWrapping;
texture.wrapT = THREE.RepeatWrapping;
texture.magFilter = THREE.NearestFilter;
const material = new THREE.MeshBasicMaterial({
map: texture,
side: THREE.DoubleSide,
});
const plane = new THREE.Mesh(geometry, material);
plane.position.set(rand(-1, 1), rand(-1, 1), 0);
plane.position.set(rand(-1, 1), rand(-1, 1), 0);
scene.add(plane);
obs.push(plane); // add to our list of obs to rotate
}
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
obs.forEach((obj, ndx) => {
const speed = .2 + ndx * .1;
const rot = time * speed;
obj.rotation.z = rot;
});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>

In three.js how to position image texture similar to 'contain' in css?

My image texture is positioned relative to the center of 3d space instead of mesh and I don't quite understand what determines its size.
Here is example showing how the same image is positioned on different meshes:
https://imgur.com/glHE97L
I'd like the image be in the center of the mesh and it's size set similar as 'contain' in css.
The mesh is flat plane created using ShapeBufferGeometry:
const shape = new THREE.Shape( edgePoints );
const geometry = new THREE.ShapeBufferGeometry( shape );
To see any image I have to set:
texture.repeat.set(0.001, 0.001);
Not sure if that matters but after creating the mesh I than set its position and rotation:
mesh.position.copy( position[0] );
mesh.rotation.set( rotation[0], rotation[1], rotation[2] );
I've tried setting those:
mesh.updateMatrixWorld( true );
mesh.geometry.computeBoundingSphere();
mesh.geometry.verticesNeedUpdate = true;
mesh.geometry.elementsNeedUpdate = true;
mesh.geometry.morphTargetsNeedUpdate = true;
mesh.geometry.uvsNeedUpdate = true;
mesh.geometry.normalsNeedUpdate = true;
mesh.geometry.colorsNeedUpdate = true;
mesh.geometry.tangentsNeedUpdate = true;
texture.needsUpdate = true;
I've played with wrapS / wrapT and offset.
I've checked UV's - I don't yet fully understand this concept but it seems fine. Example of UV for one mesh (I understand those are XY coordinates and they seem to reflect the actual corners of my mesh):
uv: Float32BufferAttribute
array: Float32Array(8)
0: -208
1: 188
2: 338
3: 188
4: 338
5: 12
6: -208
7: 12
I've tried setting:
texture.repeat.set(imgHeight/geometryHeight/1000, imgWidth/geometryWidth/1000);
This is how THREE.ShapeGeometry() computes UV coordinate:
https://github.com/mrdoob/three.js/blob/e622cc7890e86663011d12ec405847baa4068515/src/geometries/ShapeGeometry.js#L157
But you can re-compute them, to put in range [0..1].
Here is an example, click the button to re-compute uvs of the shape geometry:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(0, 0, 10);
var renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
var grid = new THREE.GridHelper(10, 10);
grid.rotation.x = Math.PI * 0.5;
scene.add(grid);
var points = [
new THREE.Vector2(0, 5),
new THREE.Vector2(-5, 4),
new THREE.Vector2(-3, -3),
new THREE.Vector2(2, -5),
new THREE.Vector2(5, 0)
];
var shape = new THREE.Shape(points);
var shapeGeom = new THREE.ShapeBufferGeometry(shape);
var shapeMat = new THREE.MeshBasicMaterial({
map: new THREE.TextureLoader().load("https://threejs.org/examples/textures/uv_grid_opengl.jpg")
});
var mesh = new THREE.Mesh(shapeGeom, shapeMat);
scene.add(mesh);
btnRecalc.addEventListener("click", onClick);
var box3 = new THREE.Box3();
var size = new THREE.Vector3();
var v3 = new THREE.Vector3(); // for re-use
function onClick(event) {
box3.setFromObject(mesh); // get AABB of the shape mesh
box3.getSize(size); // get size of that box
var pos = shapeGeom.attributes.position;
var uv = shapeGeom.attributes.uv;
for (let i = 0; i < pos.count; i++) {
v3.fromBufferAttribute(pos, i);
v3.subVectors(v3, box3.min).divide(size); // cast world uvs to range 0..1
uv.setXY(i, v3.x, v3.y);
}
uv.needsUpdate = true; // set it to true to make changes visible
}
renderer.setAnimationLoop(() => {
renderer.render(scene, camera);
});
body {
overflow: hidden;
margin: 0;
}
<script src="https://threejs.org/build/three.min.js"></script>
<button id="btnRecalc" style="position: absolute;">Re-calculate UVs</button>

THREE.js (r60) PointLight not reflected by a special plane object (heightmapped from image)

UPDATE Cause of problem has been found - see Update section end of question.
I have a complex app using THREE.js (r60) which adds a special plane object to the main scene. The plane geometry is determined by heightmapping from an internally-supplied base64 uri image (size 16x16, 32x32 or 64x64 pixels). The scene has two static lights (ambient and directional) and one moveable point light which switches on and off.
In the complex app the point light is not reflected by the plane object. (Point light is toggled by pressing "R" key or button).
I have made a first JSFiddle example using THREE.js latest version (r70) where the lights work fine.
[Update] I have now made a second JSFiddle example using the older THREE.js library (r60) it also works OK.
I suspect the problem in the complex app (r60) may have something to do with system capacity and or timing/sequencing. Capacity is definitely an issue because other simpler scene objects (boxes and cylinders) show individual responses or non-responses to the point light which vary from one run of the app to the next, seemingly depending on the overall level of system activity (cpu, memory usage). These simpler objects may reflect in one run but not in the next. But the heightmapped plane object is consistently non-reflective to the point light. These behaviors are observed on (i) a Win7 laptop and (ii) an Android Kitkat tablet.
The heightmapping process may be part of the cause. I say this because when I comment out the heightmapped plane and activate a simple similar plane object (with randomly assigned z-levels) the latter plane behaves as expected (i.e. it reflects point light).
I guess that the usual approach now would be to upgrade my complex app to r70 (not a trivial step) and then start disabling chunks of the app to narrow down the cause. However it may be that the way in which heightmapping is implemented (e.g. with a callback) is a factor in explaining the failure of the heightmapped plane to reflect point light.
[RE-WRITTEN] So I would be grateful if anyone could take a look at the code in the correctly-working, previously-cited, (r70) JSFiddle example and point out any glaring design faults which (if applied in more complex, heavilly-loaded apps) might lead to failure of the height-mapped plane to reflect point light.
Full code (javascript, not html or css) of the (r70) JSFiddle:-
//... Heightmap from Image file
//... see http://danni-three.blogspot.co.uk/2013/09/threejs-heightmaps.html
var camera, scene, renderer;
var lpos_x = -60,lpos_y = 20,lpos_z = 100;
var mz = 1;
var time = 0, dt = 0;
var MyPlane, HPlane;
base64_imgData = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAeAB4AAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAAExAAIAAAASAAAATgAAAAAAAAB4AAAAAQAAAHgAAAABUGFpbnQuTkVUIHYzLjUuMTAA/9sAQwANCQoLCggNCwsLDw4NEBQhFRQSEhQoHR4YITAqMjEvKi4tNDtLQDQ4RzktLkJZQkdOUFRVVDM/XWNcUmJLU1RR/9sAQwEODw8UERQnFRUnUTYuNlFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFR/8AAEQgAIAAgAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A19Z8SXdu5KOMKxBAFOi1uTUdNJguxFcAchv6Vz2so/mzKc8sc8VX8MyQjUVWYNweCO9AEsOuX8s+xrqWQh8DJ4rsJCphSN3Czsm7ArG1bT7fSFe7EZJzuX3J6VQsdRnvryJ2+/wooA6O501JY7yRh0U8Vyg1WzsghhsAkqnBO4nd713t8NsEqIhJfqRXEahotxPJlISDnOaANzWvL1rR4JiTG6ryorG0C2aDUI02lhu6kVZ02wvLVSpYtu65yRXQaZYvDL5rhRx2oA//2Q==";
init();
animate();
//==================================================================
function init() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 10);
camera.position.x = 1300;
camera.position.y = 400;
camera.position.z = 0;
camera.lookAt(0, 0, 0);
scene.add(camera);
scene.add(new THREE.AmbientLight(0x001900));
SunLight = new THREE.DirectionalLight(0xff0000,.3,20000);//...color, intensity, range.
SunLight.position.set(0, 3000, -8000);
scene.add(SunLight);
//POINT LIGHT
PL_color = 0x0000ff;
PL_intensity = 10;
PL_range_to_zero_intensity = 1200;
PL = new THREE.PointLight(PL_color, PL_intensity, PL_range_to_zero_intensity);
scene.add(PL);
PL_pos_x = -100;
PL_pos_y = -100;
PL_pos_z = 120;
PL.position.set(PL_pos_x, PL_pos_y, PL_pos_z);
//INDICATOR SPHERE
var s_Geometry = new THREE.SphereGeometry(5, 20, 20);
var s_Material = new THREE.MeshBasicMaterial({
color: 0xaaaaff
});
i_Sphere = new THREE.Mesh(s_Geometry, s_Material);
i_Sphere.position.set(PL_pos_x, PL_pos_y, PL_pos_z);
scene.add(i_Sphere);
//Plane02
var Plane02Geo = new THREE.PlaneGeometry(50, 50); //...
var Plane02Material = new THREE.MeshPhongMaterial({
side: THREE.DoubleSide
}, {
color: 0xaaaaaa
});
Plane02 = new THREE.Mesh(Plane02Geo, Plane02Material);
Plane02.position.set(0, 0, -120);
scene.add(Plane02);
//PEAS
xxx = SOW_F_Make_peas();
//RENDERER
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.shadowMapEnabled = true;
renderer.shadowMapSoft = false;
document.body.appendChild(renderer.domElement);
// controls
controls = new THREE.OrbitControls(camera, renderer.domElement);
xxx = SOW_F_Make_Heightmap_Object_from_Image_File(scene, camera);
} //...EOFunction Init
//==================================================================
function animate() {
dt = 0.1;
time += dt;
if (time < 10000) {
requestAnimationFrame(animate);
//move point light & indicator sphere
speed = 16;
if (Math.abs(PL_pos_z) > 400) mz = (-1)* mz;
PL_pos_x += 0.01 * speed * mz;
PL_pos_y += 0.05 * speed * mz;
PL_pos_z -= 0.2 * speed * mz;
PL.position.set(PL_pos_x, PL_pos_y, PL_pos_z);
i_Sphere.position.set(PL_pos_x, PL_pos_y, PL_pos_z);
renderer.render(scene, camera);
} else alert("Time=" + time + "Finished");
}
//==================================================================
function SOW_F_Make_Heightmap_Object_from_Image_File(givenScene, givenCamera) {
//... Read a Heightmap from a coloured image file
//... into a (pre-defined global) plane object called HPlane
MyImage = new Image();
MyImage.onload = function () {
var MyPlane_width = 1000;//6000; //...MyPlane width or height are in scene units and do not have to match image width or height
var MyPlane_height = 1000;//6000;
var MyPlane_w_segs = MyImage.naturalWidth - 1; //... important that this mapping is correct for texture 1 pixel :: 1 segment.
var MyPlane_h_segs = MyImage.naturalHeight - 1; //... important that this mapping is correct for texture 1 pixel :: 1 segment.
var Hgeometry = new THREE.PlaneGeometry(MyPlane_width, MyPlane_height, MyPlane_w_segs, MyPlane_h_segs);
//var texture = THREE.ImageUtils.loadTexture( '/images/Tri_VP_Texturemap.jpg' );
var texture = THREE.ImageUtils.loadTexture( base64_imgData );
//... Choose texture or color
//var Hmaterial = new THREE.MeshLambertMaterial( { map: texture, side: THREE.DoubleSide} );//....fails
var Hmaterial = new THREE.MeshPhongMaterial( {
color: 0x111111 , side: THREE.DoubleSide } ); //... works OK
HPlane = new THREE.Mesh(Hgeometry, Hmaterial);
//...get Height Data from Image
var scale = 0.6;//1//6; //0.25;
var Height_data = DA_getHeightData(MyImage, scale);
//... set height of vertices
X_offset = 0;
Y_offset = 0;
Z_offset = -100; //...this will (after rotation) add to the vertical height dimension (+ => up).
for (var iii = 0; iii < HPlane.geometry.vertices.length; iii++) {
//HPlane.geometry.vertices[iii].x = X_offset;
//HPlane.geometry.vertices[iii].y = Y_offset;
HPlane.geometry.vertices[iii].z = Z_offset + Height_data[iii];
}
//----------------------------------------------------------------------
//... Must do it in this order...Faces before Vertices
//... see WestLangley's response in http://stackoverflow.com/questions/13943907/my-object-isnt-reflects-the-light-in-three-js
HPlane.rotation.x = (-(Math.PI) / 2); //... rotate MyPlane -90 degrees on X
//alert("Rotated");
HPlane.geometry.computeFaceNormals(); //... for Lambert & Phong materials
HPlane.geometry.computeVertexNormals(); //... for Lambert & Phong materials
/*
HPlane.updateMatrixWorld();
HPlane.matrixAutoUpdate = false;
HPlane.geometry.verticesNeedUpdate = true;
*/
givenScene.add(HPlane);
HPlane.position.set(0, -150, 0);//... cosmetic
//return HPlane; //... not necessary, given that HPlane is global.
} ; //... End of MyImage.onload = function ()
//===============================================================
//... *** IMPORTANT ***
//... Only NOW do we command the script to actually load the image source
//... This .src statement will load the image from file into MyImage object
//... and invoke the pre-associated MyImage.OnLoad function
//... cause cross-origin problem: MyImage.src = '/images/Tri_VP_Heightmap_64x64.jpg'; //...if image file is local to this html file.
MyImage.src = base64_imgData;//... uses image data provided in the script to avoid Cross-origin file source restrictions.
} //... End of function SOW_F_Make_Heightmap_Object_from_Image_File
//===========================================================================
function DA_getHeightData(d_img, scale) {
//... This is used by function SOW_F_Make_Heightmap_Object_from_Image_File.
//if (scale == undefined) scale=1;
var canvas = document.createElement('canvas');
canvas.width = d_img.width; //OK
canvas.height = d_img.height;
var context = canvas.getContext('2d');
var size = d_img.width * d_img.height;
var data = new Float32Array(size);
context.drawImage(d_img, 0, 0);
for (var ii = 0; ii < size; ii++) {
data[ii] = 0;
}
var imgData = context.getImageData(0, 0, d_img.width, d_img.height);
var pix = imgData.data; //... Uint(8) UnClamped Array[1024] for a 16x16 = 256 pixel image = 4 slots per pixel.
var jjj = 0;
//... presumably each pix cell can have value 0 to 255
for (var iii = 0; iii < pix.length; iii += 4) {
var all = pix[iii] + pix[iii + 1] + pix[iii + 2];
//... I guess RGBA and we don't use the fourth cell (A, = Alpha channel)
jjj++;
data[jjj] = all * scale / 3; //...original code used 12 not 3 ??? and divided by scale.
//console.log (iii, all/(3*scale), data[jjj]);
}
return data;
} //... end of function DA_getHeightData(d_img,scale)
//==================================================================================================
function SOW_F_Get_A_Plane(givenScene, givenCamera) {
//...MyPlane width or height are in scene units and do not have to match image width or height
var MyPlane_width = 1000;
var MyPlane_height = 1000;
var MyPlane_w_segs = 64; //...
var MyPlane_h_segs = 64; //...
geometry = new THREE.PlaneGeometry(MyPlane_width, MyPlane_height, MyPlane_w_segs, MyPlane_h_segs);
//var material = new THREE.MeshLambertMaterial( { color: 0xeeee00, side: THREE.DoubleSide} );
var material = new THREE.MeshPhongMaterial({
color: 0xeeee00,side: THREE.DoubleSide
}); //... OK
MyPlane = new THREE.Mesh(geometry, material);
givenScene.add(MyPlane);
MyPlane.rotation.x = (-(Math.PI) / 2); // rotate it -90 degrees on X
MyPlane.position.set(0, 100, 0);
MyPlane.geometry.computeFaceNormals(); //...for Lambert & Phong materials
MyPlane.geometry.computeVertexNormals(); //...for Lambert & Phong materials
/*
MyPlane.geometry.verticesNeedUpdate = true;
MyPlane.updateMatrixWorld();
MyPlane.matrixAutoUpdate = false;
*/
} //... EOF SOW_F_Get_A_Plane
//====================================================================
function SOW_F_Make_peas()
{
//----------------- Make an array of spheres -----------------------
Pea_geometry = new THREE.SphereGeometry(5,16,16);
//Pea_material = new THREE.MeshNormalMaterial({ shading: THREE.SmoothShading});
Pea_material = new THREE.MeshPhongMaterial({ color: 0xaa5522});
// global...
num_peas = 1200;
for (var iii = 0; iii < num_peas; iii++)
{
//...now global
ob_Pea = new THREE.Mesh(Pea_geometry, Pea_material);
ob_Pea.position.set(
400 * Math.random() - 150,
300 * Math.random() - 150,
1200 * Math.random() - 150);
scene.add(ob_Pea);//TEST
}
}
UPDATE
It appears the problem is a result of phasing. See this new JSFiddle(r70). Pointlight is created in function init() but not added to scene, or is immediately removed from scene after being added. Then various graphical mesh objects are created. When pointlight is added back to the scene (in the animate loop) it is too late - the mesh objects will not be illuminated by the pointlight.
A procedural solution is simply to not remove pointlights from the scene if they are to be used later. If they need to be "extinguished" temporarilly then just turn down the intensity and turn it up later: e.g.
myPointLight.intensity = 0.00

ThreeJS - how to pick just one type of objects?

I'm new to ThreeJS and I have an issue with picking objects by raycasting. I have created some spheres and some lines but only want to change the spheres on mouseover. I think I need to add some condition in the raycast code but I have no idea what...
Here's my code, hope anyone can help:
This creates the objects:
var numSpheres = 10;
var angRand = [numSpheres];
var spread = 10;
var radius = windowY/5;
var radiusControl = 20;
//sphere
var sphereGeometry = new THREE.SphereGeometry(0.35, 100, 100);
//line
var lineGeometry = new THREE.Geometry();
var lineMaterial = new THREE.LineBasicMaterial({
color: 0xCCCCCC
});
//create dynamically
for (var i = 0; i < numSpheres; i++) {
var sphereMaterial = new THREE.MeshBasicMaterial({color: 0x334455});
var sphere = new THREE.Mesh(sphereGeometry, sphereMaterial);
var line = new THREE.Line(lineGeometry, lineMaterial);
angRand[i] = Math.floor((Math.random() * 360) + 1);//random angle for each sphere/line
var radiusIncr = spread * (angRand[i]+200)/180;
var xPos = Math.cos((360/numSpheres * (i) + angRand[i]/2 )) * (radius - radiusIncr);
var yPos = Math.sin((360/numSpheres * (i) + angRand[i]/2 )) * (radius - radiusIncr);
var offsetY = Math.floor((Math.random()*5)+1);
sphere.position.x = xPos/radiusControl;
sphere.position.y = yPos/radiusControl + offsetY;
lineGeometry.vertices.push(
new THREE.Vector3(0, 0, 0),
new THREE.Vector3(sphere.position.x, sphere.position.y, 0)
);
scene.add(sphere);
scene.add(line);
}
And this is my raycast:
var mouse = {
x: 0,
y: 0
},
INTERSECTED;
window.addEventListener('mousemove', onMouseMove, false);
window.requestAnimationFrame(render);
function onMouseMove(event) {
// calculate mouse position in normalized device coordinates
// (-1 to +1) for both components
//event.preventDefault();
mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
//console.log(mouse.x + " | " + mouse.y);
}
function mousePos() {
// find intersections
// create a Ray with origin at the mouse position
// and direction into the scene (camera direction)
var vector = new THREE.Vector3(mouse.x, mouse.y, 0.5);
vector.unproject(camera);
var ray = new THREE.Raycaster(camera.position, vector.sub(camera.position).normalize());
ray.linePrecision = 1;
// create an array containing all objects in the scene with which the ray intersects
var intersects = ray.intersectObjects(scene.children, true);
//console.log(intersects.length);
// INTERSECTED = the object in the scene currently closest to the camera
// and intersected by the Ray projected from the mouse position
// if there is one (or more) intersections
if (intersects.length > 0) {
// if the closest object intersected is not the currently stored intersection object
if (intersects[0].object != INTERSECTED) {
// restore previous intersection object (if it exists) to its original color
if (INTERSECTED)
INTERSECTED.material.color.setHex(INTERSECTED.currentHex);
// store reference to closest object as current intersection object
INTERSECTED = intersects[0].object;
// store color of closest object (for later restoration)
INTERSECTED.currentHex = INTERSECTED.material.color.getHex();
// set a new color for closest object
INTERSECTED.material.color.setHex(0xEE7F00);
//INTERSECTED.radius.set( 1, 2, 2 );
}
} else // there are no intersections
{
// restore previous intersection object (if it exists) to its original color
if (INTERSECTED)
INTERSECTED.material.color.setHex(INTERSECTED.currentHex);
//INTERSECTED.scale.set( 1, 1, 1 );
// remove previous intersection object reference
// by setting current intersection object to "nothing"
INTERSECTED = null;
}
}
The raycast returns an intersect array of objects which itself contains information about what the ray hit.
Since you only have spheres and lines you can branch on the geometry type intersects[0].object.geometry.type which would be either 'LineGeometry' or 'SphereGeometry'.
Edit: Obligatory jsfiddle, see console for hit output.
http://jsfiddle.net/z43hjqm9/1/
To simplify working with the mouse, you can use the class EventsControls. Try to make through this example.
<script src="js/controls/EventsControls.js"></script>
EventsControls = new EventsControls( camera, renderer.domElement );
EventsControls.attachEvent('mouseOver', function() {
this.container.style.cursor = 'pointer';
this.mouseOvered.material = selMaterial;
...
});
EventsControls.attachEvent('mouseOut', function() {
this.container.style.cursor = 'auto';
this.mouseOvered.material = autoMaterial;
...
});
//
function render() {
EventsControls.update();
controls.update();
renderer.render(scene, camera);
}
In your code,
var intersects = ray.intersectObjects(scene.children, true);
the first parameter to the call is an object that will be evaluated to see if it, or any of its descendants (recursive is true) intersect the ray.
So, simply create an object target and add the spheres to it (but not the lines).
This will make your call also more effective
1.use different arrays to place different objects
a.for all objectType1,after scene.add(objectType1)-> do array1.push(objectType1)
b.for all objectType 2,after scene.add(objectType2)-> do array2.push(objectType2)
now whichever type of objects you want to interact, pass that array in intersect as-
var intersects = raycaster.intersectObjects( arrayType1,true);
now only the arrayType1 objects will interact.

Resources