Updating three.js texture throughthe dat.gui dropdown - three.js

I have loaded different textures using textureLoader and I am trying to update them using dat.gui controls.
Why is the code below not working?
gui.add(mesh.position, "y", -1, 1, 0.1);
gui.add(mesh.material, "map", { alpha: alphaTexture, color: colorTexture, normal: normalTexture })
.onChange(() => {
mesh.material.needsUpdate = true;
console.log("updated");
});
It gives this error:
"Uncaught TypeError: m is undefined" [error][1]

After some tweaking, I found that the values of object(or array) in the third argument only supports string types, so passing a object as a value would not work.
This is the closest workaround that I could think of..
/* GUI options */
const guiOptions = {
mesh_material_map: "color",
};
/* Textures */
const textureLoader = new THREE.TextureLoader(loadingManager);
const colorTexture = textureLoader.load("/textures/door/color.jpg");
const alphaTexture = textureLoader.load("/textures/door/alpha.jpg");
const normalTexture = textureLoader.load("/textures/door/normal.jpg");
const guiTextureHash = {
color: colorTexture,
alpha: alphaTexture,
normal: normalTexture,
};
/* Add to gui */
gui.add(guiOptions, "mesh_material_map", Object.keys(guiTextureHash)).onChange((value) => {
mesh.material.map = guiTextureHash[value];
mesh.needsUpdate = true;
console.log("updated", value);
});

I found your topic looking for a texture picker. It's probably a little away from your starting point but could help some other. I finally made a simple texture picker with a dropdown selection key with dat.gui. The goal is to be able to change on the fly my matcap texture, going through an array of loaded texture.
const gui = new dat.GUI()
const textureLoader = new THREE.TextureLoader()
const myMatCap = [
textureLoader.load('./textures/matcaps/1.png'),
textureLoader.load('./textures/matcaps/2.png'),
textureLoader.load('./textures/matcaps/3.png')
]
const parameters = {
color: 0xff0000,
matCapTexture: 0
}
const updateAllMaterials = () => {
scene.traverse( (child)=>{
if(child instanceof THREE.Mesh && child.material instanceof THREE.MeshMatcapMaterial) {
child.material.matcap = myMatCap[ parameters.matCapTexture]
child.material.needsUpdate = true
}
})
}
gui.add(parameters, 'matCapTexture', {
terracotta: 0,
grey: 1,
chrome: 2,
}).onFinishChange(()=>{
updateAllMaterials()
})
let mesh = new THREE.Mesh(
geometry,
new THREE.MeshMatcapMaterial({
side: THREE.DoubleSide,
matcap: myMatCap[ parameters.matCapTexture ]
})
);
scene.add(mesh)

Related

How do you correctly include three.js and gltf.js

I'm rewriting my question because stackoverflow thought my post was spam (because I included 6000+ lines of code). I'm trying to make a web app that tracks the user's face and puts a 3D object over the face like a "filter". The thing is, I don't want this app to have any external dependencies except 1 (at least for scripts/packages/modules/whatever). Therefore, I copied three.js minified (from https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js) inbetween in the HTML file, as well as the GLTFLoader.js script from the GitHub repository in examples/jsm/loaders/.
I started with Jeeliz's face filter repository, and I'm trying to implement GLTFLoader.js, but when I use
const loader = new GLTFLoader();
it gives me a GLTFLoader is not defined error, as well as console messages that ES6 module was not imported.
When I use
const loader = new THREE.GLTFLoader();
it says it's not a constructor, so I lean towards the former being the correct way to construct the loader.
I appreciate any help in advance! I mostly code in Python or C++ and I'm still a beginner, but I've tinkered with JavaScript a few times so I thought I could handle this. I posted this question once but stackoverflow crashed after saying the post is spam, so I'm including a link to the html file:
https://www.dropbox.com/s/p86xchlldr7j2vf/index-mine.html?dl=0
Code:
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="content-language" content="en-EN" />
<title>Filter Web App</title>
<!-- INCLUDE FACEFILTER SCRIPT -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/dist/jeelizFaceFilter.js’>
</script>
<!-- INCLUDE THREE.JS -->
<script src=‘https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js’>
</script>
<!-- INCLUDE RESIZER -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/helpers/JeelizResizer.js’>
</script>
<!-- INCLUDE GLTFLOADER.JS -->
<script src=‘https://raw.githubusercontent.com/mrdoob/three.js/dev/examples/jsm/loaders/GLTFLoader.js’>
</script>
<!-- INCLUDE JEELIZ THREE.JS HELPER -->
<script>
/*
Helper for Three.js
*/
const punkThreeHelper = (function(){
// internal settings:
const _settings = {
rotationOffsetX: 0.0, // negative -> look upper. in radians
pivotOffsetYZ: [0.2, 0.6],// YZ of the distance between the center of the cube and the pivot
detectionThreshold: 0.8, // sensibility, between 0 and 1. Less -> more sensitive
detectionHysteresis: 0.02,
//tweakMoveYRotateX: 0,//0.5, // tweak value: move detection window along Y axis when rotate the face around X (look up <-> down)
cameraMinVideoDimFov: 35 // Field of View for the smallest dimension of the video in degrees
};
// private vars:
let _threeRenderer = null,
_threeScene = null,
_threeVideoMesh = null,
_threeVideoTexture = null,
_threeTranslation = null;
let _maxFaces = -1,
_isMultiFaces = false,
_detectCallback = null,
_isVideoTextureReady = false,
_isSeparateThreeCanvas = false,
_faceFilterCv = null,
_videoElement = null,
_isDetected = false,
_scaleW = 1,
_canvasAspectRatio = -1;
const _threeCompositeObjects = [];
let _gl = null,
_glVideoTexture = null,
_glShpCopyCut = null,
_glShpCopyCutVideoMatUniformPointer = null;
let _videoTransformMat2 = null;
// private funcs:
function destroy(){
_isVideoTextureReady = false;
_threeCompositeObjects.splice(0);
if (_threeVideoTexture){
_threeVideoTexture.dispose();
_threeVideoTexture = null;
}
}
function create_threeCompositeObjects(){
for (let i=0; i<_maxFaces; ++i){
// COMPOSITE OBJECT WHICH WILL TRACK A DETECTED FACE
const threeCompositeObject = new THREE.Object3D();
threeCompositeObject.frustumCulled = false;
threeCompositeObject.visible = false;
_threeCompositeObjects.push(threeCompositeObject);
_threeScene.add(threeCompositeObject);
}
}
function create_videoScreen(){
const videoScreenVertexShaderSource = "attribute vec2 position;\n\
uniform mat2 videoTransformMat2;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_Position = vec4(position, 0., 1.);\n\
vUV = 0.5 + videoTransformMat2 * position;\n\
}";
const videoScreenFragmentShaderSource = "precision lowp float;\n\
uniform sampler2D samplerVideo;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_FragColor = texture2D(samplerVideo, vUV);\n\
}";
if (_isSeparateThreeCanvas){
const compile_shader = function(source, type, typeString) {
const glShader = _gl.createShader(type);
_gl.shaderSource(glShader, source);
_gl.compileShader(glShader);
if (!_gl.getShaderParameter(glShader, _gl.COMPILE_STATUS)) {
alert("ERROR IN " + typeString + " SHADER: " + _gl.getShaderInfoLog(glShader));
return null;
}
return glShader;
};
const glShaderVertex = compile_shader(videoScreenVertexShaderSource, _gl.VERTEX_SHADER, 'VERTEX');
const glShaderFragment = compile_shader(videoScreenFragmentShaderSource, _gl.FRAGMENT_SHADER, 'FRAGMENT');
_glShpCopyCut = _gl.createProgram();
_gl.attachShader(_glShpCopyCut, glShaderVertex);
_gl.attachShader(_glShpCopyCut, glShaderFragment);
_gl.linkProgram(_glShpCopyCut);
const samplerVideo = _gl.getUniformLocation(_glShpCopyCut, 'samplerVideo');
_glShpCopyCutVideoMatUniformPointer = _gl.getUniformLocation(_glShpCopyCut, 'videoTransformMat2');
return;
}
// init video texture with red:
_threeVideoTexture = new THREE.DataTexture( new Uint8Array([255,0,0]), 1, 1, THREE.RGBFormat);
_threeVideoTexture.needsUpdate = true;
// CREATE THE VIDEO BACKGROUND:
const videoMaterial = new THREE.RawShaderMaterial({
depthWrite: false,
depthTest: false,
vertexShader: videoScreenVertexShaderSource,
fragmentShader: videoScreenFragmentShaderSource,
uniforms:{
samplerVideo: {value: _threeVideoTexture},
videoTransformMat2: {
value: _videoTransformMat2
}
}
});
const videoGeometry = new THREE.BufferGeometry()
const videoScreenCorners = new Float32Array([-1,-1, 1,-1, 1,1, -1,1]);
// handle both new and old THREE.js versions:
const setVideoGeomAttribute = (videoGeometry.setAttribute || videoGeometry.addAttribute).bind(videoGeometry);
setVideoGeomAttribute( 'position', new THREE.BufferAttribute( videoScreenCorners, 2 ) );
videoGeometry.setIndex(new THREE.BufferAttribute(new Uint16Array([0,1,2, 0,2,3]), 1));
_threeVideoMesh = new THREE.Mesh(videoGeometry, videoMaterial);
that.apply_videoTexture(_threeVideoMesh);
_threeVideoMesh.renderOrder = -1000; // render first
_threeVideoMesh.frustumCulled = false;
_threeScene.add(_threeVideoMesh);
} //end create_videoScreen()
function detect(detectState){
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
_isDetected = threeCompositeObject.visible;
const ds = detectState[i];
if (_isDetected && ds.detected < _settings.detectionThreshold-_settings.detectionHysteresis){
// DETECTION LOST
if (_detectCallback) _detectCallback(i, false);
threeCompositeObject.visible = false;
} else if (!_isDetected && ds.detected > _settings.detectionThreshold+_settings.detectionHysteresis){
// FACE DETECTED
if (_detectCallback) _detectCallback(i, true);
threeCompositeObject.visible = true;
}
}); //end loop on all detection slots
}
function update_poses(ds, threeCamera){
// tan( <horizontal FoV> / 2 ):
const halfTanFOVX = Math.tan(threeCamera.aspect * threeCamera.fov * Math.PI/360); //tan(<horizontal FoV>/2), in radians (threeCamera.fov is vertical FoV)
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
if (!threeCompositeObject.visible) return;
const detectState = ds[i];
// tweak Y position depending on rx:
//const tweak = _settings.tweakMoveYRotateX * Math.tan(detectState.rx);
const cz = Math.cos(detectState.rz), sz = Math.sin(detectState.rz);
// relative width of the detection window (1-> whole width of the detection window):
const W = detectState.s * _scaleW;
// distance between the front face of the cube and the camera:
const DFront = 1 / ( 2 * W * halfTanFOVX );
// D is the distance between the center of the unit cube and the camera:
const D = DFront + 0.5;
// coords in 2D of the center of the detection window in the viewport:
const xv = detectState.x * _scaleW;
const yv = detectState.y * _scaleW;
// coords in 3D of the center of the cube (in the view coordinates system):
const z = -D; // minus because view coordinate system Z goes backward
const x = xv * D * halfTanFOVX;
const y = yv * D * halfTanFOVX / _canvasAspectRatio;
// set position before pivot:
threeCompositeObject.position.set(-sz*_settings.pivotOffsetYZ[0], -cz*_settings.pivotOffsetYZ[0], -_settings.pivotOffsetYZ[1]);
// set rotation and apply it to position:
threeCompositeObject.rotation.set(detectState.rx+_settings.rotationOffsetX, detectState.ry, detectState.rz, "ZYX");
threeCompositeObject.position.applyEuler(threeCompositeObject.rotation);
// add translation part:
_threeTranslation.set(x, y+_settings.pivotOffsetYZ[0], z+_settings.pivotOffsetYZ[1]);
threeCompositeObject.position.add(_threeTranslation);
}); //end loop on composite objects
}
//public methods:
const that = {
// launched with the same spec object than callbackReady. set spec.threeCanvasId to the ID of the threeCanvas to be in 2 canvas mode:
init: function(spec, detectCallback){
destroy();
_maxFaces = spec.maxFacesDetected;
_glVideoTexture = spec.videoTexture;
_videoTransformMat2 = spec.videoTransformMat2;
_gl = spec.GL;
_faceFilterCv = spec.canvasElement;
_isMultiFaces = (_maxFaces>1);
_videoElement = spec.videoElement;
// enable 2 canvas mode if necessary:
let threeCanvas = null;
if (spec.threeCanvasId){
_isSeparateThreeCanvas = true;
// adjust the threejs canvas size to the threejs canvas:
threeCanvas = document.getElementById(spec.threeCanvasId);
threeCanvas.setAttribute('width', _faceFilterCv.width);
threeCanvas.setAttribute('height', _faceFilterCv.height);
} else {
threeCanvas = _faceFilterCv;
}
if (typeof(detectCallback) !== 'undefined'){
_detectCallback = detectCallback;
}
// init THREE.JS context:
_threeRenderer = new THREE.WebGLRenderer({
context: (_isSeparateThreeCanvas) ? null : _gl,
canvas: threeCanvas,
alpha: (_isSeparateThreeCanvas || spec.alpha) ? true : false,
preserveDrawingBuffer: true // to make image capture possible
});
_threeScene = new THREE.Scene();
_threeTranslation = new THREE.Vector3();
create_threeCompositeObjects();
create_videoScreen();
// handle device orientation change:
window.addEventListener('orientationchange', function(){
setTimeout(punkfacefilter.resize, 1000);
}, false);
const returnedDict = {
videoMesh: _threeVideoMesh,
renderer: _threeRenderer,
scene: _threeScene
};
if (_isMultiFaces){
returnedDict.faceObjects = _threeCompositeObjects
} else {
returnedDict.faceObject = _threeCompositeObjects[0];
}
return returnedDict;
}, //end that.init()
detect: function(detectState){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states:
detect(ds);
},
get_isDetected: function() {
return _isDetected;
},
render: function(detectState, threeCamera){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states then poses:
detect(ds);
update_poses(ds, threeCamera);
if (_isSeparateThreeCanvas){
// render the video texture on the faceFilter canvas:
_gl.viewport(0, 0, _faceFilterCv.width, _faceFilterCv.height);
_gl.useProgram(_glShpCopyCut);
_gl.uniformMatrix2fv(_glShpCopyCutVideoMatUniformPointer, false, _videoTransformMat2);
_gl.activeTexture(_gl.TEXTURE0);
_gl.bindTexture(_gl.TEXTURE_2D, _glVideoTexture);
_gl.drawElements(_gl.TRIANGLES, 3, _gl.UNSIGNED_SHORT, 0);
} else {
// reinitialize the state of THREE.JS because JEEFACEFILTER have changed stuffs:
// -> can be VERY costly !
_threeRenderer.state.reset();
}
// trigger the render of the THREE.JS SCENE:
_threeRenderer.render(_threeScene, threeCamera);
},
sortFaces: function(bufferGeometry, axis, isInv){ // sort faces long an axis
// Useful when a bufferGeometry has alpha: we should render the last faces first
const axisOffset = {X:0, Y:1, Z:2}[axis.toUpperCase()];
const sortWay = (isInv) ? -1 : 1;
// fill the faces array:
const nFaces = bufferGeometry.index.count/3;
const faces = new Array(nFaces);
for (let i=0; i<nFaces; ++i){
faces[i] = [bufferGeometry.index.array[3*i], bufferGeometry.index.array[3*i+1], bufferGeometry.index.array[3*i+2]];
}
// compute centroids:
const aPos = bufferGeometry.attributes.position.array;
const centroids = faces.map(function(face, faceIndex){
return [
(aPos[3*face[0]]+aPos[3*face[1]]+aPos[3*face[2]])/3, // X
(aPos[3*face[0]+1]+aPos[3*face[1]+1]+aPos[3*face[2]+1])/3, // Y
(aPos[3*face[0]+2]+aPos[3*face[1]+2]+aPos[3*face[2]+2])/3, // Z
face
];
});
// sort centroids:
centroids.sort(function(ca, cb){
return (ca[axisOffset]-cb[axisOffset]) * sortWay;
});
// reorder bufferGeometry faces:
centroids.forEach(function(centroid, centroidIndex){
const face = centroid[3];
bufferGeometry.index.array[3*centroidIndex] = face[0];
bufferGeometry.index.array[3*centroidIndex+1] = face[1];
bufferGeometry.index.array[3*centroidIndex+2] = face[2];
});
}, //end sortFaces
get_threeVideoTexture: function(){
return _threeVideoTexture;
},
apply_videoTexture: function(threeMesh){
if (_isVideoTextureReady){
return;
}
threeMesh.onAfterRender = function(){
// Replace _threeVideoTexture.__webglTexture by the real video texture:
try {
_threeRenderer.properties.update(_threeVideoTexture, '__webglTexture', _glVideoTexture);
_threeVideoTexture.magFilter = THREE.LinearFilter;
_threeVideoTexture.minFilter = THREE.LinearFilter;
_isVideoTextureReady = true;
} catch(e){
console.log('WARNING in punkThreeHelper: the glVideoTexture is not fully initialized');
}
delete(threeMesh.onAfterRender);
};
},
// create an occluder, IE a transparent object which writes on the depth buffer:
create_threejsOccluder: function(occluderURL, callback){
const occluderMesh = new THREE.Mesh();
new THREE.BufferGeometryLoader().load(occluderURL, function(occluderGeometry){
const mat = new THREE.ShaderMaterial({
vertexShader: THREE.ShaderLib.basic.vertexShader,
fragmentShader: "precision lowp float;\n void main(void){\n gl_FragColor=vec4(1.,0.,0.,1.);\n }",
uniforms: THREE.ShaderLib.basic.uniforms,
colorWrite: false
});
occluderMesh.renderOrder = -1; //render first
occluderMesh.material = mat;
occluderMesh.geometry = occluderGeometry;
if (typeof(callback)!=='undefined' && callback) callback(occluderMesh);
});
return occluderMesh;
},
set_pivotOffsetYZ: function(pivotOffset) {
_settings.pivotOffsetYZ = pivotOffset;
},
create_camera: function(zNear, zFar){
const threeCamera = new THREE.PerspectiveCamera(1, 1, (zNear) ? zNear : 0.1, (zFar) ? zFar : 100);
that.update_camera(threeCamera);
return threeCamera;
},
update_camera: function(threeCamera){
// compute aspectRatio:
const canvasElement = _threeRenderer.domElement;
const cvw = canvasElement.width;
const cvh = canvasElement.height;
_canvasAspectRatio = cvw / cvh;
// compute vertical field of view:
const vw = _videoElement.videoWidth;
const vh = _videoElement.videoHeight;
const videoAspectRatio = vw / vh;
const fovFactor = (vh > vw) ? (1.0 / videoAspectRatio) : 1.0;
const fov = _settings.cameraMinVideoDimFov * fovFactor;
console.log('INFO in punkThreeHelper - update_camera(): Estimated vertical video FoV is', fov);
// compute X and Y offsets in pixels:
let scale = 1.0;
if (_canvasAspectRatio > videoAspectRatio) {
// the canvas is more in landscape format than the video, so we crop top and bottom margins:
scale = cvw / vw;
} else {
// the canvas is more in portrait format than the video, so we crop right and left margins:
scale = cvh / vh;
}
const cvws = vw * scale, cvhs = vh * scale;
const offsetX = (cvws - cvw) / 2.0;
const offsetY = (cvhs - cvh) / 2.0;
_scaleW = cvw / cvws;
// apply parameters:
threeCamera.aspect = _canvasAspectRatio;
threeCamera.fov = fov;
console.log('INFO in punkThreeHelper.update_camera(): camera vertical estimated FoV is', fov, 'deg');
threeCamera.setViewOffset(cvws, cvhs, offsetX, offsetY, cvw, cvh);
threeCamera.updateProjectionMatrix();
// update drawing area:
_threeRenderer.setSize(cvw, cvh, false);
_threeRenderer.setViewport(0, 0, cvw, cvh);
}, //end update_camera()
resize: function(w, h, threeCamera){
_threeRenderer.domElement.width = w;
_threeRenderer.domElement.height = h;
punkfacefilter.resize();
if (threeCamera){
that.update_camera(threeCamera);
}
}
}
return that;
})();
// Export ES6 module:
try {
module.exports = punkThreeHelper;
} catch(e){
console.log('punkThreeHelper ES6 Module not exported');
window.punkThreeHelper = punkThreeHelper;
}
</script>
<!-- INCLUDE DEMO SCRIPT -->
<script>
let THREECAMERA = null;
// callback: launched if a face is detected or lost.
function detect_callback(faceIndex, isDetected) {
if (isDetected) {
console.log('INFO in detect_callback(): DETECTED');
} else {
console.log('INFO in detect_callback(): LOST');
}
}
// build the 3D. called once when punk Face Filter is OK
function init_threeScene(spec) {
const threeStuffs = punkThreeHelper.init(spec, detect_callback);
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
//CREATE THE CAMERA
THREECAMERA = punkThreeHelper.create_camera();
}
// entry point:
function main(){
punkResizer.size_canvas({
canvasId: 'jeeFaceFilterCanvas',
callback: function(isError, bestVideoSettings){
init_faceFilter(bestVideoSettings);
}
})
}
function init_faceFilter(videoSettings){
punkfacefilter.init({
followZRot: true,
canvasId: 'jeeFaceFilterCanvas',
NNCPath: '/', // root of NN_DEFAULT.json file
maxFacesDetected: 1,
callbackReady: function(errCode, spec){
if (errCode){
console.log('AN ERROR HAPPENS. ERR =', errCode);
return;
}
console.log('INFO: punkfacefilter IS READY');
init_threeScene(spec);
},
// called at each render iteration (drawing loop):
callbackTrack: function(detectState){
punkThreeHelper.render(detectState, THREECAMERA);
}
}); //end punkfacefilter.init call
}
window.addEventListener('load', main);
</script>
<style>
a {color: #eee; text-decoration: none}
a:hover {color: blue;}
body {overflow: auto; overflow-y: auto;
background-color: white;
background-attachment: fixed;
background-position: center;
background-size: contain;
margin: 0px;}
#jeeFaceFilterCanvas {
z-index: 10;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#threeCanvas{
z-index: 11;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#media (max-width: 787px) {
#jeeFaceFilterCanvas {
right: 0px;
top: 60px;
transform: rotateY(180deg);
}
}
</style>
</head>
<body>
<canvas width="600" height="600" id='jeeFaceFilterCanvas'></canvas>
</body>
</html>
/your-glb-file2.glb is a correct 3D file in the same directory as this. I made this code from the html file I linked, but src’d the script from URLs, unlike the real html file I linked.
The
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
near the end is the problem. The app correctly puts a cube over my face like I want it to when it’s
// CREATE A CUBE
const cubeGeometry = new THREE.BoxGeometry(1,1,1);
const cubeMaterial = new THREE.MeshNormalMaterial();
const threeCube = new THREE.Mesh(cubeGeometry, cubeMaterial);
threeCube.frustumCulled = false;
threeStuffs.faceObject.add(threeCube);
I tried constructing the loader as a "sub"function of three.js and not as one, but it isn't working either way, when it worked with a cube over my face and the cube 3D object loaded with three.js' native functions.

How to use Threejs to create a boundary for the Mesh load by fbxloador

The created boundary's scale and rotation are totally different with the import fbxmodel.
Hi, I have loaded a fbx model into the scene by fbxLoador.
const addFbxModel = (modelName: string, position: Vector3) => {
const fbxLoader = new FBXLoader();
fbxLoader.load(
`../../src/assets/models/${modelName}.fbx`,
(fbxObject: Object3D) => {
fbxObject.position.set(position.x, position.y, position.z);
console.log(fbxObject.scale);
fbxObject.scale.set(0.01, 0.01, 0.01);
const material = new MeshBasicMaterial({ color: 0x008080 });
fbxObject.traverse((object) => {
if (object instanceof Mesh) {
object.material = material;
}
});
scene.add(fbxObject);
updateRenderer();
updateCamera();
render();
},
(xhr) => {
console.log((xhr.loaded / xhr.total) * 100 + "% loaded");
},
(error) => {
console.log(error);
}
);
};
And now i want to add a click function on this model which will highlight it by showing it's boundary box.
const onclick = (event: MouseEvent) => {
event.preventDefault();
if (renderBox.value) {
const mouse = new Vector2();
mouse.x = (event.offsetX / renderBox.value.clientWidth) * 2 - 1;
mouse.y = -(event.offsetY / renderBox.value.clientHeight) * 2 + 1;
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObjects(scene.children, true);
if (intersects.length > 0) {
const intersect = intersects[0];
const object = intersect.object;
createBoundary(object);
}
}
};
const createBoundary = (object: Object3D) => {
if (object instanceof Mesh) {
console.log(object.geometry, object.scale, object.rotation);
const edges = new EdgesGeometry(object.geometry);
const material = new LineBasicMaterial({ color: 0xffffff });
const wireframe = new LineSegments(edges, material);
wireframe.scale.copy(object.scale);
wireframe.rotation.copy(object.rotation);
console.log(wireframe.scale);
scene.add(wireframe);
}
};
But now the boundary's scale and rotation are totally different with the fbxmodel.
And also the boundary is too complex, is it possible to create one only include the outline and the top point.
Thank you. :)

Make camera fly to object and fit to fiew

I am bit struggle to solve this fit to view object. I want camera fly to a object and centering the screen. Currently just got camera to the object position.
private fitToView = (objSelected: Object3D | undefined) => {
if (objSelected) {
const coord = {
x: this.camera.position.x,
y: this.camera.position.y,
z: this.camera.position.z,
};
this.orbitControl.enabled = false;
const worldPosition = new Vector3();
objSelected.getWorldPosition(worldPosition);
const newCoord = {
x: worldPosition.x,
y: worldPosition.y + 2,
z: worldPosition.z,
};
const tweenCamera = new Tween(coord)
.to(newCoord, 1500)
.easing(Easing.Quadratic.InOut)
.onUpdate(() => {
this.camera.position.set(coord.x, coord.y, coord.z);
})
.onComplete(() => {
this.orbitControl.enabled = true;
this.orbitControl.update();
});
tweenCamera.start();
}
};
This picture I expected fit view after clicking object.

How to add lightmap to three js?

I baked some lightmap from on blender 2.8(Bake type - combined, Influence - "Direct", "Indirect", "Diffuse").
After that, I tried to add lightmap to part of my model.
function initColor(parent, type, mtl) {
parent.traverse(o => {
if (o.isMesh) {
if (o.name.includes(type)) {
o.material = mtl;
}
}
});
}
var lightmapTexture = new THREE.ImageUtils.loadTexture( "images/FrontLightmap.png" );
var front_material = new THREE.MeshBasicMaterial({lightMap: lightmapTexture });
const INITIAL_MAP = [
{childID: "Front", mtl: front_material}
];
for (let object of INITIAL_MAP) {
initColor(theModel, object.childID, object.mtl);
}
But the result - black material(front part).
Is there is possible to separately usage of lightmap and different material colors at the same time?

Editing part sizes of a 3d model

I want to develop a web app to entering measurements of a man and displaying a 3d model with these measurements. I have chosen three.js to start it. And I downloaded a 3d model named standard-male-figure from clara.io. Here is my code to display human model.
import React, { Component } from "react";
import PropTypes from "prop-types";
import withStyles from "#material-ui/core/styles/withStyles";
import * as THREE from "three-full";
const styles = (/*theme*/) => ({
});
class ThreeDView extends Component {
constructor(props){
super(props);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
this.renderScene - this.renderScene.bind(this);
this.animate = this.animate.bind(this);
}
componentDidMount() {
const width = this.mount.clientWidth;
const height = this.mount.clientHeight;
//ADD SCENE
this.scene = new THREE.Scene();
//ADD CAMERA
this.camera = new THREE.PerspectiveCamera(100,1);
this.camera.position.z = 12;
this.camera.position.y = 0;
this.camera.position.x = 0;
//ADD RENDERER
this.renderer = new THREE.WebGLRenderer({ antialias: true });
this.renderer.setClearColor("#f0f0f0");
this.renderer.setSize(width, height);
this.mount.appendChild(this.renderer.domElement);
// MOUSE ROTATION
this.orbit = new THREE.OrbitControls(this.camera,this.renderer.domElement);
this.orbit.update();
//ADD LIGHTS
this.light = new THREE.PointLight(0xffffff,1.3);
this.light.position.z = 10;
this.light.position.y=20;
this.scene.add(this.light);
// ADD MAN FIGURE
const loader = new THREE.ColladaLoader();
loader.load("/models/standard-male-figure.dae",(manFigure)=>{
this.man = manFigure;
this.man.name = "man-figure";
this.man.scene.position.y = -10;
this.scene.add(this.man.scene);
},undefined,()=>alert("Loading failed"));
this.start();
}
componentWillUnmount() {
this.stop();
this.mount.removeChild(this.renderer.domElement);
}
start() {
if (!this.frameId) {
this.frameId = requestAnimationFrame(this.animate);
}
}
stop () {
cancelAnimationFrame(this.frameId);
}
animate () {
this.renderScene();
this.frameId = window.requestAnimationFrame(this.animate);
}
renderScene () {
this.orbit.update();
this.light.position.z = this.camera.position.z;
this.light.position.y=this.camera.position.y+20;
this.light.position.x=this.camera.position.x;
this.renderer.render(this.scene, this.camera);
}
render() {
return (
<div style={{ height: "640px" }} ref={(mount) => { this.mount = mount; }} >
</div>
);
}
}
ThreeDView.propTypes = {
values: PropTypes.object
};
/*
all values in inches
values = {
heightOfHand:10,
, etc..
}
*/
export default withStyles(styles)(ThreeDView);
values is measurements that user is entering. I have no idea about how to start updating 3d model with these measurements. Please give me a starting point or any advise to complete this. Thank You!.
Firstly you can get the current size and scale of your man
const box = new THREE.Box3().setFromObject(this.man)
const currentObjectSize = box.getSize()
const currentObjectScale = this.man.scale
and when an user update the size value (newSize), you can calculate a new scale for you man
const newScale = new THREE.Vector3(
currentObjectScale.x * newSize.x/currentObjectSize.x,
currentObjectScale.y * newSize.y/currentObjectSize.y,
currentObjectScale.z * newSize.z/currentObjectSize.z
)
and update your man with this scale
this.man.scale.set(newScale.x, newScale.y, newScale.z)

Resources