How do you correctly include three.js and gltf.js - three.js

I'm rewriting my question because stackoverflow thought my post was spam (because I included 6000+ lines of code). I'm trying to make a web app that tracks the user's face and puts a 3D object over the face like a "filter". The thing is, I don't want this app to have any external dependencies except 1 (at least for scripts/packages/modules/whatever). Therefore, I copied three.js minified (from https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js) inbetween in the HTML file, as well as the GLTFLoader.js script from the GitHub repository in examples/jsm/loaders/.
I started with Jeeliz's face filter repository, and I'm trying to implement GLTFLoader.js, but when I use
const loader = new GLTFLoader();
it gives me a GLTFLoader is not defined error, as well as console messages that ES6 module was not imported.
When I use
const loader = new THREE.GLTFLoader();
it says it's not a constructor, so I lean towards the former being the correct way to construct the loader.
I appreciate any help in advance! I mostly code in Python or C++ and I'm still a beginner, but I've tinkered with JavaScript a few times so I thought I could handle this. I posted this question once but stackoverflow crashed after saying the post is spam, so I'm including a link to the html file:
https://www.dropbox.com/s/p86xchlldr7j2vf/index-mine.html?dl=0
Code:
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="content-language" content="en-EN" />
<title>Filter Web App</title>
<!-- INCLUDE FACEFILTER SCRIPT -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/dist/jeelizFaceFilter.js’>
</script>
<!-- INCLUDE THREE.JS -->
<script src=‘https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js’>
</script>
<!-- INCLUDE RESIZER -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/helpers/JeelizResizer.js’>
</script>
<!-- INCLUDE GLTFLOADER.JS -->
<script src=‘https://raw.githubusercontent.com/mrdoob/three.js/dev/examples/jsm/loaders/GLTFLoader.js’>
</script>
<!-- INCLUDE JEELIZ THREE.JS HELPER -->
<script>
/*
Helper for Three.js
*/
const punkThreeHelper = (function(){
// internal settings:
const _settings = {
rotationOffsetX: 0.0, // negative -> look upper. in radians
pivotOffsetYZ: [0.2, 0.6],// YZ of the distance between the center of the cube and the pivot
detectionThreshold: 0.8, // sensibility, between 0 and 1. Less -> more sensitive
detectionHysteresis: 0.02,
//tweakMoveYRotateX: 0,//0.5, // tweak value: move detection window along Y axis when rotate the face around X (look up <-> down)
cameraMinVideoDimFov: 35 // Field of View for the smallest dimension of the video in degrees
};
// private vars:
let _threeRenderer = null,
_threeScene = null,
_threeVideoMesh = null,
_threeVideoTexture = null,
_threeTranslation = null;
let _maxFaces = -1,
_isMultiFaces = false,
_detectCallback = null,
_isVideoTextureReady = false,
_isSeparateThreeCanvas = false,
_faceFilterCv = null,
_videoElement = null,
_isDetected = false,
_scaleW = 1,
_canvasAspectRatio = -1;
const _threeCompositeObjects = [];
let _gl = null,
_glVideoTexture = null,
_glShpCopyCut = null,
_glShpCopyCutVideoMatUniformPointer = null;
let _videoTransformMat2 = null;
// private funcs:
function destroy(){
_isVideoTextureReady = false;
_threeCompositeObjects.splice(0);
if (_threeVideoTexture){
_threeVideoTexture.dispose();
_threeVideoTexture = null;
}
}
function create_threeCompositeObjects(){
for (let i=0; i<_maxFaces; ++i){
// COMPOSITE OBJECT WHICH WILL TRACK A DETECTED FACE
const threeCompositeObject = new THREE.Object3D();
threeCompositeObject.frustumCulled = false;
threeCompositeObject.visible = false;
_threeCompositeObjects.push(threeCompositeObject);
_threeScene.add(threeCompositeObject);
}
}
function create_videoScreen(){
const videoScreenVertexShaderSource = "attribute vec2 position;\n\
uniform mat2 videoTransformMat2;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_Position = vec4(position, 0., 1.);\n\
vUV = 0.5 + videoTransformMat2 * position;\n\
}";
const videoScreenFragmentShaderSource = "precision lowp float;\n\
uniform sampler2D samplerVideo;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_FragColor = texture2D(samplerVideo, vUV);\n\
}";
if (_isSeparateThreeCanvas){
const compile_shader = function(source, type, typeString) {
const glShader = _gl.createShader(type);
_gl.shaderSource(glShader, source);
_gl.compileShader(glShader);
if (!_gl.getShaderParameter(glShader, _gl.COMPILE_STATUS)) {
alert("ERROR IN " + typeString + " SHADER: " + _gl.getShaderInfoLog(glShader));
return null;
}
return glShader;
};
const glShaderVertex = compile_shader(videoScreenVertexShaderSource, _gl.VERTEX_SHADER, 'VERTEX');
const glShaderFragment = compile_shader(videoScreenFragmentShaderSource, _gl.FRAGMENT_SHADER, 'FRAGMENT');
_glShpCopyCut = _gl.createProgram();
_gl.attachShader(_glShpCopyCut, glShaderVertex);
_gl.attachShader(_glShpCopyCut, glShaderFragment);
_gl.linkProgram(_glShpCopyCut);
const samplerVideo = _gl.getUniformLocation(_glShpCopyCut, 'samplerVideo');
_glShpCopyCutVideoMatUniformPointer = _gl.getUniformLocation(_glShpCopyCut, 'videoTransformMat2');
return;
}
// init video texture with red:
_threeVideoTexture = new THREE.DataTexture( new Uint8Array([255,0,0]), 1, 1, THREE.RGBFormat);
_threeVideoTexture.needsUpdate = true;
// CREATE THE VIDEO BACKGROUND:
const videoMaterial = new THREE.RawShaderMaterial({
depthWrite: false,
depthTest: false,
vertexShader: videoScreenVertexShaderSource,
fragmentShader: videoScreenFragmentShaderSource,
uniforms:{
samplerVideo: {value: _threeVideoTexture},
videoTransformMat2: {
value: _videoTransformMat2
}
}
});
const videoGeometry = new THREE.BufferGeometry()
const videoScreenCorners = new Float32Array([-1,-1, 1,-1, 1,1, -1,1]);
// handle both new and old THREE.js versions:
const setVideoGeomAttribute = (videoGeometry.setAttribute || videoGeometry.addAttribute).bind(videoGeometry);
setVideoGeomAttribute( 'position', new THREE.BufferAttribute( videoScreenCorners, 2 ) );
videoGeometry.setIndex(new THREE.BufferAttribute(new Uint16Array([0,1,2, 0,2,3]), 1));
_threeVideoMesh = new THREE.Mesh(videoGeometry, videoMaterial);
that.apply_videoTexture(_threeVideoMesh);
_threeVideoMesh.renderOrder = -1000; // render first
_threeVideoMesh.frustumCulled = false;
_threeScene.add(_threeVideoMesh);
} //end create_videoScreen()
function detect(detectState){
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
_isDetected = threeCompositeObject.visible;
const ds = detectState[i];
if (_isDetected && ds.detected < _settings.detectionThreshold-_settings.detectionHysteresis){
// DETECTION LOST
if (_detectCallback) _detectCallback(i, false);
threeCompositeObject.visible = false;
} else if (!_isDetected && ds.detected > _settings.detectionThreshold+_settings.detectionHysteresis){
// FACE DETECTED
if (_detectCallback) _detectCallback(i, true);
threeCompositeObject.visible = true;
}
}); //end loop on all detection slots
}
function update_poses(ds, threeCamera){
// tan( <horizontal FoV> / 2 ):
const halfTanFOVX = Math.tan(threeCamera.aspect * threeCamera.fov * Math.PI/360); //tan(<horizontal FoV>/2), in radians (threeCamera.fov is vertical FoV)
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
if (!threeCompositeObject.visible) return;
const detectState = ds[i];
// tweak Y position depending on rx:
//const tweak = _settings.tweakMoveYRotateX * Math.tan(detectState.rx);
const cz = Math.cos(detectState.rz), sz = Math.sin(detectState.rz);
// relative width of the detection window (1-> whole width of the detection window):
const W = detectState.s * _scaleW;
// distance between the front face of the cube and the camera:
const DFront = 1 / ( 2 * W * halfTanFOVX );
// D is the distance between the center of the unit cube and the camera:
const D = DFront + 0.5;
// coords in 2D of the center of the detection window in the viewport:
const xv = detectState.x * _scaleW;
const yv = detectState.y * _scaleW;
// coords in 3D of the center of the cube (in the view coordinates system):
const z = -D; // minus because view coordinate system Z goes backward
const x = xv * D * halfTanFOVX;
const y = yv * D * halfTanFOVX / _canvasAspectRatio;
// set position before pivot:
threeCompositeObject.position.set(-sz*_settings.pivotOffsetYZ[0], -cz*_settings.pivotOffsetYZ[0], -_settings.pivotOffsetYZ[1]);
// set rotation and apply it to position:
threeCompositeObject.rotation.set(detectState.rx+_settings.rotationOffsetX, detectState.ry, detectState.rz, "ZYX");
threeCompositeObject.position.applyEuler(threeCompositeObject.rotation);
// add translation part:
_threeTranslation.set(x, y+_settings.pivotOffsetYZ[0], z+_settings.pivotOffsetYZ[1]);
threeCompositeObject.position.add(_threeTranslation);
}); //end loop on composite objects
}
//public methods:
const that = {
// launched with the same spec object than callbackReady. set spec.threeCanvasId to the ID of the threeCanvas to be in 2 canvas mode:
init: function(spec, detectCallback){
destroy();
_maxFaces = spec.maxFacesDetected;
_glVideoTexture = spec.videoTexture;
_videoTransformMat2 = spec.videoTransformMat2;
_gl = spec.GL;
_faceFilterCv = spec.canvasElement;
_isMultiFaces = (_maxFaces>1);
_videoElement = spec.videoElement;
// enable 2 canvas mode if necessary:
let threeCanvas = null;
if (spec.threeCanvasId){
_isSeparateThreeCanvas = true;
// adjust the threejs canvas size to the threejs canvas:
threeCanvas = document.getElementById(spec.threeCanvasId);
threeCanvas.setAttribute('width', _faceFilterCv.width);
threeCanvas.setAttribute('height', _faceFilterCv.height);
} else {
threeCanvas = _faceFilterCv;
}
if (typeof(detectCallback) !== 'undefined'){
_detectCallback = detectCallback;
}
// init THREE.JS context:
_threeRenderer = new THREE.WebGLRenderer({
context: (_isSeparateThreeCanvas) ? null : _gl,
canvas: threeCanvas,
alpha: (_isSeparateThreeCanvas || spec.alpha) ? true : false,
preserveDrawingBuffer: true // to make image capture possible
});
_threeScene = new THREE.Scene();
_threeTranslation = new THREE.Vector3();
create_threeCompositeObjects();
create_videoScreen();
// handle device orientation change:
window.addEventListener('orientationchange', function(){
setTimeout(punkfacefilter.resize, 1000);
}, false);
const returnedDict = {
videoMesh: _threeVideoMesh,
renderer: _threeRenderer,
scene: _threeScene
};
if (_isMultiFaces){
returnedDict.faceObjects = _threeCompositeObjects
} else {
returnedDict.faceObject = _threeCompositeObjects[0];
}
return returnedDict;
}, //end that.init()
detect: function(detectState){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states:
detect(ds);
},
get_isDetected: function() {
return _isDetected;
},
render: function(detectState, threeCamera){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states then poses:
detect(ds);
update_poses(ds, threeCamera);
if (_isSeparateThreeCanvas){
// render the video texture on the faceFilter canvas:
_gl.viewport(0, 0, _faceFilterCv.width, _faceFilterCv.height);
_gl.useProgram(_glShpCopyCut);
_gl.uniformMatrix2fv(_glShpCopyCutVideoMatUniformPointer, false, _videoTransformMat2);
_gl.activeTexture(_gl.TEXTURE0);
_gl.bindTexture(_gl.TEXTURE_2D, _glVideoTexture);
_gl.drawElements(_gl.TRIANGLES, 3, _gl.UNSIGNED_SHORT, 0);
} else {
// reinitialize the state of THREE.JS because JEEFACEFILTER have changed stuffs:
// -> can be VERY costly !
_threeRenderer.state.reset();
}
// trigger the render of the THREE.JS SCENE:
_threeRenderer.render(_threeScene, threeCamera);
},
sortFaces: function(bufferGeometry, axis, isInv){ // sort faces long an axis
// Useful when a bufferGeometry has alpha: we should render the last faces first
const axisOffset = {X:0, Y:1, Z:2}[axis.toUpperCase()];
const sortWay = (isInv) ? -1 : 1;
// fill the faces array:
const nFaces = bufferGeometry.index.count/3;
const faces = new Array(nFaces);
for (let i=0; i<nFaces; ++i){
faces[i] = [bufferGeometry.index.array[3*i], bufferGeometry.index.array[3*i+1], bufferGeometry.index.array[3*i+2]];
}
// compute centroids:
const aPos = bufferGeometry.attributes.position.array;
const centroids = faces.map(function(face, faceIndex){
return [
(aPos[3*face[0]]+aPos[3*face[1]]+aPos[3*face[2]])/3, // X
(aPos[3*face[0]+1]+aPos[3*face[1]+1]+aPos[3*face[2]+1])/3, // Y
(aPos[3*face[0]+2]+aPos[3*face[1]+2]+aPos[3*face[2]+2])/3, // Z
face
];
});
// sort centroids:
centroids.sort(function(ca, cb){
return (ca[axisOffset]-cb[axisOffset]) * sortWay;
});
// reorder bufferGeometry faces:
centroids.forEach(function(centroid, centroidIndex){
const face = centroid[3];
bufferGeometry.index.array[3*centroidIndex] = face[0];
bufferGeometry.index.array[3*centroidIndex+1] = face[1];
bufferGeometry.index.array[3*centroidIndex+2] = face[2];
});
}, //end sortFaces
get_threeVideoTexture: function(){
return _threeVideoTexture;
},
apply_videoTexture: function(threeMesh){
if (_isVideoTextureReady){
return;
}
threeMesh.onAfterRender = function(){
// Replace _threeVideoTexture.__webglTexture by the real video texture:
try {
_threeRenderer.properties.update(_threeVideoTexture, '__webglTexture', _glVideoTexture);
_threeVideoTexture.magFilter = THREE.LinearFilter;
_threeVideoTexture.minFilter = THREE.LinearFilter;
_isVideoTextureReady = true;
} catch(e){
console.log('WARNING in punkThreeHelper: the glVideoTexture is not fully initialized');
}
delete(threeMesh.onAfterRender);
};
},
// create an occluder, IE a transparent object which writes on the depth buffer:
create_threejsOccluder: function(occluderURL, callback){
const occluderMesh = new THREE.Mesh();
new THREE.BufferGeometryLoader().load(occluderURL, function(occluderGeometry){
const mat = new THREE.ShaderMaterial({
vertexShader: THREE.ShaderLib.basic.vertexShader,
fragmentShader: "precision lowp float;\n void main(void){\n gl_FragColor=vec4(1.,0.,0.,1.);\n }",
uniforms: THREE.ShaderLib.basic.uniforms,
colorWrite: false
});
occluderMesh.renderOrder = -1; //render first
occluderMesh.material = mat;
occluderMesh.geometry = occluderGeometry;
if (typeof(callback)!=='undefined' && callback) callback(occluderMesh);
});
return occluderMesh;
},
set_pivotOffsetYZ: function(pivotOffset) {
_settings.pivotOffsetYZ = pivotOffset;
},
create_camera: function(zNear, zFar){
const threeCamera = new THREE.PerspectiveCamera(1, 1, (zNear) ? zNear : 0.1, (zFar) ? zFar : 100);
that.update_camera(threeCamera);
return threeCamera;
},
update_camera: function(threeCamera){
// compute aspectRatio:
const canvasElement = _threeRenderer.domElement;
const cvw = canvasElement.width;
const cvh = canvasElement.height;
_canvasAspectRatio = cvw / cvh;
// compute vertical field of view:
const vw = _videoElement.videoWidth;
const vh = _videoElement.videoHeight;
const videoAspectRatio = vw / vh;
const fovFactor = (vh > vw) ? (1.0 / videoAspectRatio) : 1.0;
const fov = _settings.cameraMinVideoDimFov * fovFactor;
console.log('INFO in punkThreeHelper - update_camera(): Estimated vertical video FoV is', fov);
// compute X and Y offsets in pixels:
let scale = 1.0;
if (_canvasAspectRatio > videoAspectRatio) {
// the canvas is more in landscape format than the video, so we crop top and bottom margins:
scale = cvw / vw;
} else {
// the canvas is more in portrait format than the video, so we crop right and left margins:
scale = cvh / vh;
}
const cvws = vw * scale, cvhs = vh * scale;
const offsetX = (cvws - cvw) / 2.0;
const offsetY = (cvhs - cvh) / 2.0;
_scaleW = cvw / cvws;
// apply parameters:
threeCamera.aspect = _canvasAspectRatio;
threeCamera.fov = fov;
console.log('INFO in punkThreeHelper.update_camera(): camera vertical estimated FoV is', fov, 'deg');
threeCamera.setViewOffset(cvws, cvhs, offsetX, offsetY, cvw, cvh);
threeCamera.updateProjectionMatrix();
// update drawing area:
_threeRenderer.setSize(cvw, cvh, false);
_threeRenderer.setViewport(0, 0, cvw, cvh);
}, //end update_camera()
resize: function(w, h, threeCamera){
_threeRenderer.domElement.width = w;
_threeRenderer.domElement.height = h;
punkfacefilter.resize();
if (threeCamera){
that.update_camera(threeCamera);
}
}
}
return that;
})();
// Export ES6 module:
try {
module.exports = punkThreeHelper;
} catch(e){
console.log('punkThreeHelper ES6 Module not exported');
window.punkThreeHelper = punkThreeHelper;
}
</script>
<!-- INCLUDE DEMO SCRIPT -->
<script>
let THREECAMERA = null;
// callback: launched if a face is detected or lost.
function detect_callback(faceIndex, isDetected) {
if (isDetected) {
console.log('INFO in detect_callback(): DETECTED');
} else {
console.log('INFO in detect_callback(): LOST');
}
}
// build the 3D. called once when punk Face Filter is OK
function init_threeScene(spec) {
const threeStuffs = punkThreeHelper.init(spec, detect_callback);
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
//CREATE THE CAMERA
THREECAMERA = punkThreeHelper.create_camera();
}
// entry point:
function main(){
punkResizer.size_canvas({
canvasId: 'jeeFaceFilterCanvas',
callback: function(isError, bestVideoSettings){
init_faceFilter(bestVideoSettings);
}
})
}
function init_faceFilter(videoSettings){
punkfacefilter.init({
followZRot: true,
canvasId: 'jeeFaceFilterCanvas',
NNCPath: '/', // root of NN_DEFAULT.json file
maxFacesDetected: 1,
callbackReady: function(errCode, spec){
if (errCode){
console.log('AN ERROR HAPPENS. ERR =', errCode);
return;
}
console.log('INFO: punkfacefilter IS READY');
init_threeScene(spec);
},
// called at each render iteration (drawing loop):
callbackTrack: function(detectState){
punkThreeHelper.render(detectState, THREECAMERA);
}
}); //end punkfacefilter.init call
}
window.addEventListener('load', main);
</script>
<style>
a {color: #eee; text-decoration: none}
a:hover {color: blue;}
body {overflow: auto; overflow-y: auto;
background-color: white;
background-attachment: fixed;
background-position: center;
background-size: contain;
margin: 0px;}
#jeeFaceFilterCanvas {
z-index: 10;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#threeCanvas{
z-index: 11;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#media (max-width: 787px) {
#jeeFaceFilterCanvas {
right: 0px;
top: 60px;
transform: rotateY(180deg);
}
}
</style>
</head>
<body>
<canvas width="600" height="600" id='jeeFaceFilterCanvas'></canvas>
</body>
</html>
/your-glb-file2.glb is a correct 3D file in the same directory as this. I made this code from the html file I linked, but src’d the script from URLs, unlike the real html file I linked.
The
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
near the end is the problem. The app correctly puts a cube over my face like I want it to when it’s
// CREATE A CUBE
const cubeGeometry = new THREE.BoxGeometry(1,1,1);
const cubeMaterial = new THREE.MeshNormalMaterial();
const threeCube = new THREE.Mesh(cubeGeometry, cubeMaterial);
threeCube.frustumCulled = false;
threeStuffs.faceObject.add(threeCube);
I tried constructing the loader as a "sub"function of three.js and not as one, but it isn't working either way, when it worked with a cube over my face and the cube 3D object loaded with three.js' native functions.

Related

How to use Threejs to create a boundary for the Mesh load by fbxloador

The created boundary's scale and rotation are totally different with the import fbxmodel.
Hi, I have loaded a fbx model into the scene by fbxLoador.
const addFbxModel = (modelName: string, position: Vector3) => {
const fbxLoader = new FBXLoader();
fbxLoader.load(
`../../src/assets/models/${modelName}.fbx`,
(fbxObject: Object3D) => {
fbxObject.position.set(position.x, position.y, position.z);
console.log(fbxObject.scale);
fbxObject.scale.set(0.01, 0.01, 0.01);
const material = new MeshBasicMaterial({ color: 0x008080 });
fbxObject.traverse((object) => {
if (object instanceof Mesh) {
object.material = material;
}
});
scene.add(fbxObject);
updateRenderer();
updateCamera();
render();
},
(xhr) => {
console.log((xhr.loaded / xhr.total) * 100 + "% loaded");
},
(error) => {
console.log(error);
}
);
};
And now i want to add a click function on this model which will highlight it by showing it's boundary box.
const onclick = (event: MouseEvent) => {
event.preventDefault();
if (renderBox.value) {
const mouse = new Vector2();
mouse.x = (event.offsetX / renderBox.value.clientWidth) * 2 - 1;
mouse.y = -(event.offsetY / renderBox.value.clientHeight) * 2 + 1;
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObjects(scene.children, true);
if (intersects.length > 0) {
const intersect = intersects[0];
const object = intersect.object;
createBoundary(object);
}
}
};
const createBoundary = (object: Object3D) => {
if (object instanceof Mesh) {
console.log(object.geometry, object.scale, object.rotation);
const edges = new EdgesGeometry(object.geometry);
const material = new LineBasicMaterial({ color: 0xffffff });
const wireframe = new LineSegments(edges, material);
wireframe.scale.copy(object.scale);
wireframe.rotation.copy(object.rotation);
console.log(wireframe.scale);
scene.add(wireframe);
}
};
But now the boundary's scale and rotation are totally different with the fbxmodel.
And also the boundary is too complex, is it possible to create one only include the outline and the top point.
Thank you. :)

Three JS device motion glitch

I'm trying to get into WebXR programming. I'm trying to make a simple wrapper which allows for a VR headset and also a Google Cardboard style for smartphones. I got the VR headset working good so far, but I have two issues when using the smartphone in landscape mode. Portrait mode works fine.
The camera breaks when going above the horizon. ** fixed **
Tilting the phone doesn't tilt. It pans left right half way.
Codesandbox.io code: https://codesandbox.io/s/webxr-7vw5q6
Codesandbox.io app: https://7vw5q6.csb.app/
Update
I managed to fix the jumping and flipped image in landscape mode by adding some code.
if(rotType == "YZX")
{
if(orientation_g >= 0) {
screenOrientation = -90;
} else {
screenOrientation = 90;
orientation_a = orientation_a + 180;
}
}
However, I still have the issue when rolling the device left or right.
// Variables
// ---------
let camera, renderer, scene, loop;
let container;
let controls;
let controller1, controller2;
let teleportmarker, raycaster, INTERSECTION;
let baseReferenceSpace;
let tempMatrix = new THREE.Matrix4();
let effect;
let action;
// fakeVR
let fakeVR = false;
let orientation_a, orientation_b, orientation_g;
/* landscape fix test */
let alphaOffset = 0;
let screenOrientation = 0;
/* ====================================================================================================
* Controller
* ==================================================================================================== */
class Controller {
constructor(i) {
this.controllers = this.createController(i);
this.controllerGrip = this.createControllerGrip(i);
if(i == 0) { this.controllers.name = "Right"; }
if(i == 1) { this.controllers.name = "Left"; }
this.axes = new THREE.Vector2();
this.viewDirection = camera.getWorldDirection( new THREE.Vector3() );
const scope = this;
raycaster = new THREE.Raycaster();
this.initInputListenerXR();
this.group = new THREE.Group();
this.teleportmarker = new THREE.Mesh(
new THREE.RingGeometry(0.2, 0.25, 32).rotateX(-Math.PI / 2),
new THREE.MeshBasicMaterial({color: 0xFF00FF})
);
scene.add(this.teleportmarker);
}
createController(i) {
const controllers = renderer.xr.getController(i);
if (0) {
this.group.add(controllers);
controllers.visible = true;
}
return controllers;
}
createControllerGrip(i) {
const controllerModelFactory = new THREE.XRControllerModelFactory();
const controllerGrip = renderer.xr.getControllerGrip(i);
controllerGrip.add(controllerModelFactory.createControllerModel(controllerGrip));
return controllerGrip;
}
initInputListenerXR() {
const listenerFor = name => event => {
const cb = this._eventListeners[name];
if (cb) {
const uuid = event.target.uuid;
const cont = this.controllers;
if (cont && cont.uuid === uuid) cb(idx);
}
};
this._addSelectListener('selectstart', this.onSelectStart);
this._addSelectListener('selectend', this.onSelectEnd);
this._addSelectListener('connected', function(event) {
this.controllers.add(this.buildController(event.data));
this.controllers.children[0].visible = false;
});
this._addSelectListener( 'disconnected', function () {
this.controllers.remove(this.controllers.children[0]);
});
}
_addSelectListener(eventName, listener) {
this.controllers.addEventListener(eventName, listener.bind(this));
}
onSelectStart() {
console.log(this.controllers.name + ' was pressed.');
this.controllers.userData.isSelecting = true;
}
onSelectEnd() {
console.log(this.controllers.name + ' was released.');
this.controllers.userData.isSelecting = false;
if ( INTERSECTION ) {
const offsetPosition = { x: - INTERSECTION.x, y: - INTERSECTION.y, z: - INTERSECTION.z, w: 1 };
const offsetRotation = new THREE.Quaternion();
const transform = new XRRigidTransform( offsetPosition, offsetRotation );
const teleportSpaceOffset = baseReferenceSpace.getOffsetReferenceSpace( transform );
renderer.xr.setReferenceSpace( teleportSpaceOffset );
}
}
update() {
INTERSECTION = undefined;
if ( controller1.controllers.userData.isSelecting === true ) {
tempMatrix.identity().extractRotation( controller1.controllers.matrixWorld );
raycaster.ray.origin.setFromMatrixPosition( controller1.controllers.matrixWorld );
raycaster.ray.direction.set( 0, 0, - 1 ).applyMatrix4( tempMatrix );
const intersects = raycaster.intersectObjects([floor]);
if (intersects.length > 0) {
INTERSECTION = intersects[0].point;
}
}
else if ( controller2.controllers.userData.isSelecting === true ) {
tempMatrix.identity().extractRotation( controller2.controllers.matrixWorld );
raycaster.ray.origin.setFromMatrixPosition( controller2.controllers.matrixWorld );
raycaster.ray.direction.set( 0, 0, - 1 ).applyMatrix4( tempMatrix );
const intersects = raycaster.intersectObjects([floor]);
if (intersects.length > 0) {
INTERSECTION = intersects[0].point;
}
}
if (INTERSECTION) this.teleportmarker.position.copy(INTERSECTION);
this.teleportmarker.visible = INTERSECTION !== undefined;
}
// updateArc() {
// }
buildController(data) {
switch(data.targetRayMode) {
// case 'screen':
// return;
case 'tracked-pointer':
const geometry = new THREE.BufferGeometry();
geometry.setAttribute('position', new THREE.Float32BufferAttribute([0,0,0,0,0,-1],3));
geometry.setAttribute('color', new THREE.Float32BufferAttribute([0.5,0.5,0.5,0,0,0],3));
const material = new THREE.LineBasicMaterial({vertexColors:true,blending:THREE.AdditiveBlending});
return new THREE.Line(geometry,material);
case 'gaze':
const gaze_geometry = new THREE.RingGoemetry(0.02,0.04,32).translate(0,0,-1);
const gaze_material = new THREE.MeshBesicMaterial({opacity:0.5,transparent:true});
return new THREE.Mesh(gaze_geometry,gaze_material);
}
}
setAction(button, functionName) {
}
action(button, functionName) {
}
}
/* ====================================================================================================
* Resizer
* ==================================================================================================== */
class Resizer {
constructor(container, camera, renderer) {
this.setSize(container, camera, renderer);
window.addEventListener('resize', () => {
this.setSize(container, camera, renderer);
this.onResize();
});
}
onResize() {
}
setSize(container, camera, renderer) {
camera.aspect = container.clientWidth / container.clientHeight;
camera.updateProjectionMatrix();
renderer.setSize(container.clientWidth, container.clientHeight);
renderer.setPixelRatio(window.devicePixelRatio);
}
}
/* ====================================================================================================
* Camera
* ==================================================================================================== */
class Camera extends THREE.PerspectiveCamera {
constructor() {
super();
this.onCreate();
}
onCreate() {
new THREE.PerspectiveCamera(35, 1, 0.1, 100);
}
}
/* ====================================================================================================
* Model
* ==================================================================================================== */
class Model extends THREE.Group {
constructor(data) {
super();
this.modelUrl = data;
this.onCreate();
}
onCreate() {
const dracoLoader = new THREE.DRACOLoader();
dracoLoader.setDecoderPath( 'vendor/three/examples/js/libs/draco/' );
dracoLoader.setDecoderConfig({ type: 'js' });
new THREE.GLTFLoader().
setDRACOLoader( dracoLoader ).
load(this.modelUrl,
gltf => {
this.updateTransform();
this.add(gltf.scene);
console.log(this);
});
}
updateMaterials(model) {
model.traverse(child => {
child.material = new THREE.MeshNormalMaterial();
});
}
updateTransform() {
}
dispose() {
}
rotate(x, y, z) {
this.rotation.x = THREE.MathUtils.degToRad(x);
this.rotation.y = THREE.MathUtils.degToRad(y);
this.rotation.z = THREE.MathUtils.degToRad(z);
}
scale(x, y, z) {
this.scale.set(x, y, z);
}
}
/* ====================================================================================================
* Loop
* ==================================================================================================== */
const clock = new THREE.Clock();
class Loop {
constructor(camera, scene, renderer) {
this.updatables = [];
}
start() {
renderer.setAnimationLoop(() => {
this.tick();
if(fakeVR) {
effect.render( scene, camera );
} else {
renderer.render(scene, camera);
}
});
}
stop() {
renderer.setAnimationLoop(null);
}
tick() {
const delta = clock.getDelta();
controller1.update();
controller2.update();
}
}
/* ====================================================================================================
* Scene
* ==================================================================================================== */
class Scene extends THREE.Scene {
constructor() {
super();
this.onCreate();
}
onCreate() {
new THREE.Scene();
this.background = new THREE.Color('skyblue');
}
createLights() {
const ambientLight = new THREE.HemisphereLight(
'white',
'darkslategrey',
5
);
const mainLight = new THREE.DirectionalLight('white', 4);
mainLight.position.set(100, 100, 100);
return { ambientLight, mainLight };
}
}
/* ====================================================================================================
* Application
* ==================================================================================================== */
class App {
constructor(i) {
// Setup <body> CSS style
document.getElementsByTagName("body")[0].style.cssText = 'width: 100vw; height: 100vh; margin: 0; padding: 0; overflow:
hidden;';
// Create VR scene <div>
const VRdiv = document.createElement('div');
VRdiv.id = "VRScene";
VRdiv.style.cssText = 'position: absolute; width: 100vw; height: 100vh; display: block;';
document.body.insertAdjacentElement('afterbegin', VRdiv);
// Controls
controls = new Controls();
// Setup Camera
camera = new Camera();
camera.position.set(0, 0, 0);
camera.up.set(0, 1, 0);
renderer = this.createRenderer();
scene = new Scene();
loop = new Loop(camera, scene, renderer);
container = document.querySelector('#VRScene');
container.append(renderer.domElement);
const { ambientLight, mainLight } = scene.createLights();
loop.updatables.push(controls);
scene.add(ambientLight, mainLight);
const resizer = new Resizer(container, camera, renderer);
this.init(i);
this.start();
}
init(i) {
this.setupXR(i);
}
setupXR(i) {
renderer.xr.addEventListener("sessionstart", () => (baseReferenceSpace = renderer.xr.getReferenceSpace()));
document.write(`
<button id='VRIcon' class='toggleVR' style=" position: fixed; bottom: 10px; left: 10px; outline: none; border:
none; background: none; width: 60px; z-index: 10000;" onclick='` + i
+ `.toggleVR()' title='Toggle VR Mode for Mobile Devices Only'>
<svg style="width: 100%; fill: white; stroke: rgba(0,0,0,0.25);" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" x="0px"
y="0px" viewBox="0 0 62.7 52.375" enable-background="new 0 0 62.7
41.9" xml:space="preserve"><path d="M53.4,5.5h-44c-2.1,0-3.7,1.7-3.7,3.7v22.6c0,2.1,1.7,3.7,3.7,3.7h13.4c1.1,0,2.1-0.6,2.5-1.6l3-7.5c1.2-2.6,4.9-2.5,6,0.1
l2.6,7.3c0.4,1,1.4,1.7,2.5,1.7h13.9c2.1,0,3.7-1.7,3.7-3.7V9.3C57.2,7.2,55.5,5.5,53.4,5.5z
M20.4,27c-3.2,0-5.7-2.6-5.7-5.7
s2.6-5.7,5.7-5.7s5.7,2.6,5.7,5.7S23.6,27,20.4,27z
M42.4,27c-3.2,0-5.7-2.6-5.7-5.7s2.6-5.7,5.7-5.7s5.7,2.6,5.7,5.7
S45.6,27,42.4,27z"/></svg>
</button>
<svg id="VROverlay" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="none
meet" width="100vw" height="100vh" viewBox="0, 0, 2000, 1000"
style="position: absolute; top: 0; left: 0; bottom: 0; right: 0;
z-index: 9999; display: none;"><g id="svgg"><path id="path0" d="M 0 0
L 0 1000 L 1000000 1000 L 1000000 0 L 0 0 z M 500.04492 15 C
636.69612 15.006191 768.82704 43.380704 892.76562 99.34375 C 896.20268 100.89576 898.95249 103.64562 900.50391 107.08398 C 1013.1637 356.78574 1013.1657 643.21219 900.50781 892.91602 C 898.9564 896.35438 896.20466 899.10424 892.76758 900.65625 C 768.82901 956.61724 636.69909 984.9898 499.95508 985 C 363.30182 984.99379 231.171 956.61724 107.23242 900.65625 C 103.79536 899.10424 101.04557 896.35438 99.494141 892.91602 C -13.163603 643.21219 -13.163603 356.78574 99.494141 107.08398 C 101.04557 103.64562 103.79536 100.89576 107.23242 99.34375 C 231.171 43.380704 363.3009 15.0062 500.04492 15 z M 1500.0449 15 C 1636.6961 15.006191 1768.827 43.380704 1892.7656 99.34375 C 1896.2026 100.89576 1898.9525 103.64562 1900.5039 107.08398 L 1900.5078 107.08398 C 2013.1656 356.78574 2013.1656 643.21219 1900.5078 892.91602 C 1898.9564 896.35438 1896.2047 899.10424 1892.7676 900.65625 C 1768.8291 956.61724 1636.6991 984.9898 1499.9551 985 C 1363.3019 984.99379 1231.1709 956.61724 1107.2324 900.65625 C 1103.7953 899.10424 1101.0455 896.35438 1099.4941 892.91602 C 986.8364 643.21219 986.8364 356.78574 1099.4941 107.08398 C 1101.0455 103.64562 1103.7953 100.89576 1107.2324 99.34375 C 1231.1709 43.380704 1363.3009 15.0062 1500.0449 15 z " stroke="none" fill="#000000" fill-rule="evenodd"></path></g></svg>
`);
if ('xr' in navigator) {
navigator.xr.isSessionSupported('immersive-vr').then(function(supported)
{
if(supported) {
renderer.xr.enabled = true;
new THREE.VRButton(renderer);
document.body.appendChild(THREE.VRButton.createButton(renderer));
document.getElementById('VRButton').style.display = 'block';
document.getElementById('VRIcon').style.display = 'block';
}
});
}
controller1 = new Controller(0);
controller2 = new Controller(1);
scene.add(controller1.controllers);
scene.add(controller1.controllerGrip);
scene.add(controller2.controllers);
scene.add(controller2.controllerGrip);
controls = new Controls();
action = new Action();
// loop.tick();
}
start() {
loop.start();
}
stop() {
loop.stop();
}
createRenderer() {
const renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setPixelRatio( window.devicePixelRatio );
renderer.physicallyCorrectLights = true;
effect = new THREE.StereoEffect( renderer );
effect.setSize( window.innerWidth, window.innerHeight );
return renderer;
}
toggleVR() {
if(DeviceMotionEvent && typeof DeviceMotionEvent.requestPermission === "function") {
DeviceMotionEvent.requestPermission();
}
if (fakeVR) {
fakeVR = false;
document.getElementById('VROverlay').style.display = 'none';
window.removeEventListener("deviceorientation", this.handleOrientation);
} else {
fakeVR = true;
window.addEventListener("deviceorientation", this.handleOrientation);
document.getElementById('VROverlay').style.display = 'block';
}
loop.stop();
loop.start();
}
handleMotion(event) {
}
handleOrientation(event) {
if (window.screen.orientation) {
screenOrientation = window.screen.orientation.angle;
} else if (typeof window.orientation === "number") {
screenOrientation = window.orientation;
} else if (window.screen.mozOrientationn) {
screenOrientation = {
"portrait-primary": 0,
"portrait-secondary": 180,
"landscape-primary": 90,
"landscape-secondary": 270,
}[window.screen.mozOrientation];
}
var eyem = new THREE.Quaternion().setFromEuler(new THREE.Euler(-Math.PI / 2, 0, 0));
var d2r = Math.PI / 180;
orientation_a = event.alpha;
orientation_b = event.beta;
orientation_g = event.gamma;
var rotType = (screenOrientation === 0 || screenOrientation === 180) ? "YXZ" : "YZX";
if(rotType == "YZX")
{
if(orientation_g >= 0) {
screenOrientation = -90;
} else {
screenOrientation = 90;
orientation_a = orientation_a + 180;
}
}
var rotm = new THREE.Quaternion().setFromEuler(
new THREE.Euler(orientation_b * d2r, orientation_a * d2r, -orientation_g * d2r, rotType)
);
var devm = new THREE.Quaternion().setFromEuler(
new THREE.Euler(0, -screenOrientation * d2r, 0)
);
rotm.multiply(devm).multiply(eyem); //rot = (rot x dev) x eye
camera.quaternion.copy(rotm);
document.getElementById("Orientation_a1").innerHTML = orientation_a.toFixed(3);
document.getElementById("Orientation_b1").innerHTML = orientation_b.toFixed(3);
document.getElementById("Orientation_g1").innerHTML = orientation_g.toFixed(3);
document.getElementById("Orientation_o1").innerHTML = screenOrientation;
document.getElementById("Orientation_a2").innerHTML = orientation_a.toFixed(3);
document.getElementById("Orientation_b2").innerHTML = orientation_b.toFixed(3);
document.getElementById("Orientation_g2").innerHTML = orientation_g.toFixed(3);
document.getElementById("Orientation_o2").innerHTML = screenOrientation;
}
}
ul {
padding-inline-start: 15px;
}
li {
list-style-type: none;
overflow: hidden;
}
<script src="https://7vw5q6.csb.app/webxr.three.js"></script>
<div style="display: block; position: fixed; top: 10%; left: 10%; right: 60%; background: rgba(255,255,255,.65);">
<ul>
<li>X-axis (β): <span id="Orientation_b1">0</span><span>°</span></li>
<li>Y-axis (γ): <span id="Orientation_g1">0</span><span>°</span></li>
<li>Z-axis (α): <span id="Orientation_a1">0</span><span>°</span></li>
<li>Orientation: <span id="Orientation_o1">0</span><span>°</span></li>
</ul>
</div>
<div style="display: block; position: fixed; top: 10%; left: 60%; right: 10%; background: rgba(255,255,255,.65);">
<ul>
<li>X-axis (β): <span id="Orientation_b2">0</span><span>°</span></li>
<li>Y-axis (γ): <span id="Orientation_g2">0</span><span>°</span></li>
<li>Z-axis (α): <span id="Orientation_a2">0</span><span>°</span></li>
<li>Orientation: <span id="Orientation_o2">0</span><span>°</span></li>
</ul>
</div>

Editing part sizes of a 3d model

I want to develop a web app to entering measurements of a man and displaying a 3d model with these measurements. I have chosen three.js to start it. And I downloaded a 3d model named standard-male-figure from clara.io. Here is my code to display human model.
import React, { Component } from "react";
import PropTypes from "prop-types";
import withStyles from "#material-ui/core/styles/withStyles";
import * as THREE from "three-full";
const styles = (/*theme*/) => ({
});
class ThreeDView extends Component {
constructor(props){
super(props);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
this.renderScene - this.renderScene.bind(this);
this.animate = this.animate.bind(this);
}
componentDidMount() {
const width = this.mount.clientWidth;
const height = this.mount.clientHeight;
//ADD SCENE
this.scene = new THREE.Scene();
//ADD CAMERA
this.camera = new THREE.PerspectiveCamera(100,1);
this.camera.position.z = 12;
this.camera.position.y = 0;
this.camera.position.x = 0;
//ADD RENDERER
this.renderer = new THREE.WebGLRenderer({ antialias: true });
this.renderer.setClearColor("#f0f0f0");
this.renderer.setSize(width, height);
this.mount.appendChild(this.renderer.domElement);
// MOUSE ROTATION
this.orbit = new THREE.OrbitControls(this.camera,this.renderer.domElement);
this.orbit.update();
//ADD LIGHTS
this.light = new THREE.PointLight(0xffffff,1.3);
this.light.position.z = 10;
this.light.position.y=20;
this.scene.add(this.light);
// ADD MAN FIGURE
const loader = new THREE.ColladaLoader();
loader.load("/models/standard-male-figure.dae",(manFigure)=>{
this.man = manFigure;
this.man.name = "man-figure";
this.man.scene.position.y = -10;
this.scene.add(this.man.scene);
},undefined,()=>alert("Loading failed"));
this.start();
}
componentWillUnmount() {
this.stop();
this.mount.removeChild(this.renderer.domElement);
}
start() {
if (!this.frameId) {
this.frameId = requestAnimationFrame(this.animate);
}
}
stop () {
cancelAnimationFrame(this.frameId);
}
animate () {
this.renderScene();
this.frameId = window.requestAnimationFrame(this.animate);
}
renderScene () {
this.orbit.update();
this.light.position.z = this.camera.position.z;
this.light.position.y=this.camera.position.y+20;
this.light.position.x=this.camera.position.x;
this.renderer.render(this.scene, this.camera);
}
render() {
return (
<div style={{ height: "640px" }} ref={(mount) => { this.mount = mount; }} >
</div>
);
}
}
ThreeDView.propTypes = {
values: PropTypes.object
};
/*
all values in inches
values = {
heightOfHand:10,
, etc..
}
*/
export default withStyles(styles)(ThreeDView);
values is measurements that user is entering. I have no idea about how to start updating 3d model with these measurements. Please give me a starting point or any advise to complete this. Thank You!.
Firstly you can get the current size and scale of your man
const box = new THREE.Box3().setFromObject(this.man)
const currentObjectSize = box.getSize()
const currentObjectScale = this.man.scale
and when an user update the size value (newSize), you can calculate a new scale for you man
const newScale = new THREE.Vector3(
currentObjectScale.x * newSize.x/currentObjectSize.x,
currentObjectScale.y * newSize.y/currentObjectSize.y,
currentObjectScale.z * newSize.z/currentObjectSize.z
)
and update your man with this scale
this.man.scale.set(newScale.x, newScale.y, newScale.z)

WebGL: Count the number of rendered vertices

Using the WebGL API, is there a way to count the number of vertices rendered within a given canvas? I've seen some tools that attempt to accomplish this task but some are giving strange results (e.g. Three.js' renderer.info.render is reporting my scene has 10,134.3 triangles).
Any help with using the raw WebGL API to count the number of rendered vertices (and, ideally, points and lines) would be greatly appreciated.
WebGL can't do this for you but you could can add your own augmentation.
The most obvious way is just to track your own usage. Instead of calling gl.drawXXX call functionThatTracksDrawingCountsXXX and track the values yourself.
You can also augment the WebGL context itself. Example
// copy this part into a file like `augmented-webgl.js`
// and include it in your page
(function() {
// NOTE: since WebGL constants are um, constant
// we could statically init this.
let primMap;
function addCount(ctx, type, count) {
const ctxInfo = ctx.info;
const primInfo = primMap[type];
ctxInfo.vertCount += count;
ctxInfo.primCount[primInfo.ndx] += primInfo.fn(count);
}
WebGLRenderingContext.prototype.drawArrays = (function(oldFn) {
return function(type, offset, count) {
addCount(this, type, count);
oldFn.call(this, type, offset, count);
};
}(WebGLRenderingContext.prototype.drawArrays));
WebGLRenderingContext.prototype.drawElements = (function(oldFn) {
return function(type, count, indexType, offset) {
addCount(this, type, count);
oldFn.call(this, type, count, indexType, offset);
};
}(WebGLRenderingContext.prototype.drawElements));
HTMLCanvasElement.prototype.getContext = (function(oldFn) {
return function(type, ...args) {
const ctx = oldFn.call(this, type, args);
if (ctx && type === "webgl") {
if (!primMap) {
primMap = {};
primMap[ctx.POINTS] = { ndx: 0, fn: count => count, };
primMap[ctx.LINE_LOOP] = { ndx: 1, fn: count => count, };
primMap[ctx.LINE_STRIP]= { ndx: 1, fn: count => count - 1, };
primMap[ctx.LINES] = { ndx: 1, fn: count => count / 2 | 0, };
primMap[ctx.TRIANGLE_STRIP] = { ndx: 2, fn: count => count - 2, };
primMap[ctx.TRIANGLE_FAN] = { ndx: 2, fn: count => count - 2, };
primMap[ctx.TRIANGLES] = { ndx: 2, fn: count => count / 3 | 0, };
};
ctx.info = {
vertCount: 0,
primCount: [0, 0, 0],
};
}
return ctx;
}
}(HTMLCanvasElement.prototype.getContext));
}());
// ---- cut above ----
const $ = document.querySelector.bind(document);
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, 1, 0.1, 1000);
const renderer = new THREE.WebGLRenderer({canvas: $('canvas')});
const geometry = new THREE.BoxGeometry(1, 1, 1);
const items = [];
for (let i = 0; i < 50; ++i) {
let item;
switch(rand(0, 3) | 0) {
case 0:
case 1:
const material = new THREE.MeshBasicMaterial({
color: rand(0xFFFFFF) | 0,
wireframe: rand(0, 3) > 2,
});
item = new THREE.Mesh(geometry, material);
break;
case 2:
const pmat = new THREE.PointsMaterial({
color: rand(0xFFFFFF) | 0,
});
item = new THREE.Points(geometry, pmat);
break;
default:
throw "oops";
}
item.position.x = rand(-10, 10);
item.position.y = rand(-10, 10);
item.position.z = rand( 0, -50);
scene.add(item);
items.push(item);
}
camera.position.z = 5;
const countElem = $('#count');
function render(time) {
time *= 0.001;
resize();
// animate the items
items.forEach((items, ndx) => {
items.rotation.x = time * 1.2 + ndx * 0.01;
items.rotation.y = time * 1.1;
});
// turn on/off a random items
items[rand(items.length) | 0].visible = Math.random() > .5;
renderer.render(scene, camera);
// get the current counts
const info = renderer.context.info;
countElem.textContent = ` VERTS: ${info.vertCount}
POINTS: ${info.primCount[0]}
LINES: ${info.primCount[1]}
TRIANGLES: ${info.primCount[2]}`;
// zero out the count
renderer.context.info.vertCount = 0;
renderer.context.info.primCount = [0, 0, 0];
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
function resize() {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
if (canvas.width !== width || canvas.height !== height) {
renderer.setSize(width, height, false);
camera.aspectRatio = width / height;
camera.updateProjectionMatrix();
}
}
body { border: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
#ui { position: absolute; left: 1em; top: 1em; background: rgba(0,0,0,.5); color: white; padding: .5em; width: 10em; }
<canvas></canvas>
<div id="ui">
<pre id="count"></pre>
</div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/92/three.min.js"></script>
Of course you might want to add support for drawArraysInstanced etc... and support for WebGL2.
We removed the amount of processed vertices from renderer.info.render since the important measurement is the amount or rendered primitives (so triangles, points, lines). Please read https://github.com/mrdoob/three.js/pull/13404 and the related issues/PRs for more information. If you still want to know how many vertices were processed, you need to count manually. WebGL can't do this for you.

Rendering in discrete layers, then composited

In my project, I know have 3 discreet layers.
Background
Midground
Foreground
Each of these layers could potentially have vastly different unit scales and potentially overlapping geometry. But I don't want them to render as if they occupy the same "space". I want them to render in their logical layer order, not their order in 3D space.
For example, lets say the Background is a narrow tube, 10 units in radius, that I put over the camera to achieve a tunnel like effect. I then want to put a large cube as the Foreground, 100 units on each side, in the scene far from the camera.
In this scenario, the cube and the tunnel intersect and obscure each other. I'm looking for a way to render the entire tunnel, and then render an entire cube, and then put that rendered cube on top of the rendered tunnel. And I want any alpha transparency in textures/shaders in that cube to be cleanly composite, showing the rendered tunnel behind transparent pixels.
So:
Am I describing a technique or feature that exists? If so, what's that called?
Can WebGL do this?
Can three.js do this?
Will this cause any massive performance drops over rendering a whole frame in one go?
How would structure my rendering in three.js to set this up?
Still new the *GL graphics programming, so sorry if my vocabulary isn't accurate. Pedantic vocabulary corrections are appreciated, as it will help me google!
I think I managed this one, after some reverse engineering of this example:
http://mrdoob.github.com/three.js/examples/webgl_rtt.html
Basically
renderer.autoClear = false;
Then instead of rendering one scene like this:
render: function() {
renderer.render(scene, camera);
}
I manually clear before rendering multiple scenes:
render: function() {
renderer.clear()
renderer.render(background, camera);
renderer.render(midground, camera);
renderer.render(foreground, camera);
}
Still not totally sure of performance implications, however.
First off you could render the 3 different canvases and just set their z-index and position so they overlap and the browser will composite them.
If you want to do it all in 1 canvas then basically you just clear the depth buffer after drawing some stuff.
drawStuffInBack();
// clear the depth (and stencil buffers)
gl.clear(gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
drawStuffInMiddle();
// clear the depth (and stencil buffers)
gl.clear(gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
drawStuffInFront();
The following is a working example that has a VisualLayers class for managing any number of layers, and it uses the renderer.autoClear = false technique and clearing-depth technique as Alex Waynes and gman's answers hinted at.
This approach is nice because renderOrder of objects is not being modified (that's another approach) and thus will not introduce other issues.
Try playing with the options in the UI to see what it does:
// #ts-check
////////////////////////
// LAYER SYSTEM
////////////////////////
/** #typedef {{name: string, backingScene: THREE.Scene, order: number}} Layer */
class VisualLayers {
/**
* #type {Array<Layer>}
* #private
*/
__layers = [];
constructor(
/** #private #type {THREE.WebGLRenderer} */ __renderer,
/** #private #type {typeof THREE.Scene} */ __Scene = THREE.Scene
) {
this.__renderer = __renderer;
this.__Scene = __Scene;
}
defineLayer(/** #type {string} */ name, /** #type {number=} */ order = 0) {
const layer = this.__getLayer(name);
// The default layer always has order 0.
const previousOrder = layer.order;
layer.order = name === "default" ? 0 : order;
// Sort only if order changed.
if (previousOrder !== layer.order)
this.__layers.sort((a, b) => a.order - b.order);
return layer;
}
/**
* Get a layer by name (if it doesn't exist, creates it with default order 0).
* #private
*/
__getLayer(/** #type {string} */ name) {
let layer = this.__layers.find((l) => l.name === name);
if (!layer) {
layer = { name, backingScene: new this.__Scene(), order: 0 };
layer.backingScene.autoUpdate = false;
this.__layers.push(layer);
}
return layer;
}
removeLayer(/** #type {string} */ name) {
const index = this.__layers.findIndex((l) => {
if (l.name === name) {
l.backingScene.children.length = 0;
return true;
}
return false;
});
if (index >= 0) this.__layers.splice(index, 1);
}
hasLayer(/** #type {string} */ name) {
return this.__layers.some((l) => l.name === name);
}
/** #readonly */
get layerCount() {
return this.__layers.length;
}
addObjectToLayer(
/** #type {THREE.Object3D} */ obj,
/** #type {string | string[]} */ layers
) {
if (Array.isArray(layers)) {
for (const name of layers) this.__addObjectToLayer(obj, name);
return;
}
this.__addObjectToLayer(obj, layers);
}
addObjectsToLayer(
/** #type {THREE.Object3D[]} */ objects,
/** #type {string | string[]} */ layers
) {
for (const obj of objects) {
this.addObjectToLayer(obj, layers);
}
}
/** #private #readonly */
__emptyArray = Object.freeze([]);
/** #private */
__addObjectToLayer(
/** #type {THREE.Object3D} */ obj,
/** #type {string} */ name
) {
const layer = this.__getLayer(name);
const proxy = Object.create(obj, {
children: { get: () => this.__emptyArray }
});
layer.backingScene.children.push(proxy);
}
removeObjectFromLayer(
/** #type {THREE.Object3D} */ obj,
/** #type {string | string[]} */ nameOrNames
) {
if (Array.isArray(nameOrNames)) {
for (const name of nameOrNames) {
const layer = this.__layers.find((l) => l.name === name);
if (!layer) continue;
this.__removeObjectFromLayer(obj, layer);
}
return;
}
const layer = this.__layers.find((l) => l.name === nameOrNames);
if (!layer) return;
this.__removeObjectFromLayer(obj, layer);
}
/** #private */
__removeObjectFromLayer(
/** #type {THREE.Object3D} */ obj,
/** #type {Layer} */ layer
) {
const children = layer.backingScene.children;
const index = children.findIndex(
(proxy) => /** #type {any} */ (proxy).__proto__ === obj
);
if (index >= 0) {
children[index] = children[children.length - 1];
children.pop();
}
}
removeObjectsFromAllLayers(/** #type {THREE.Object3D[]} */ objects) {
for (const layer of this.__layers) {
for (const obj of objects) {
this.__removeObjectFromLayer(obj, layer);
}
}
}
render(
/** #type {THREE.Camera} */ camera,
/** #type {(layerName: string) => void} */ beforeEach,
/** #type {(layerName: string) => void} */ afterEach
) {
for (const layer of this.__layers) {
beforeEach(layer.name);
this.__renderer.render(layer.backingScene, camera);
afterEach(layer.name);
}
}
}
//////////////////////
// VARS
//////////////////////
let camera, stats, geometry, material, object, object2, root;
let time = 0;
/** #type {THREE.Scene} */
let scene;
/** #type {THREE.WebGLRenderer} */
let renderer;
/** #type {VisualLayers} */
let visualLayers;
const clock = new THREE.Clock();
const greenColor = "#27ae60";
const options = {
useLayers: true,
showMiddleBox: true,
rotate: true,
layer2Order: 2
};
//////////////////////
// INIT
//////////////////////
~(function init() {
setup3D();
renderLoop();
})();
////////////////////////////////
// SETUP 3D
////////////////////////////////
function setup3D() {
const container = document.createElement("div");
container.id = "container";
document.body.appendChild(container);
// CAMERA
camera = new THREE.PerspectiveCamera(
70,
window.innerWidth / window.innerHeight,
1,
10000
);
camera.position.x = 0;
camera.position.z = 500;
camera.position.y = 0;
scene = new THREE.Scene();
// RENDERERS
renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true });
renderer.setClearColor(0x111111);
container.appendChild(renderer.domElement);
// LAYERS
visualLayers = new VisualLayers(renderer);
// Layers don't have to be defined. Adding an object to a layer will
// automatically create the layer with order 0. But let's define layers with
// order values.
visualLayers.defineLayer("layer1", 1);
visualLayers.defineLayer("layer2", 2);
visualLayers.defineLayer("layer3", 3);
// LIGHTS
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.5);
directionalLight.position.set(300, 0, 300);
scene.add(directionalLight);
visualLayers.addObjectToLayer(directionalLight, [
"layer1",
"layer2",
"layer3"
]);
const ambientLight = new THREE.AmbientLight(0xffffff, 0.4);
scene.add(ambientLight);
visualLayers.addObjectToLayer(ambientLight, ["layer1", "layer2", "layer3"]);
// GEOMETRY
root = new THREE.Object3D();
scene.add(root);
geometry = new THREE.BoxGeometry(100, 100, 100);
material = new THREE.MeshPhongMaterial({
color: greenColor,
transparent: false,
opacity: 1
});
object = new THREE.Mesh(geometry, material);
root.add(object);
visualLayers.addObjectToLayer(object, "layer1");
object.position.y = 80;
object.position.z = -20;
// object.rotation.y = -Math.PI / 5
object2 = new THREE.Mesh(geometry, material);
object.add(object2);
visualLayers.addObjectToLayer(object2, "layer2");
object2.position.y -= 80;
object2.position.z = -20;
object2.rotation.y = -Math.PI / 5;
const object3 = new THREE.Mesh(geometry, material);
object2.add(object3);
visualLayers.addObjectToLayer(object3, "layer3");
object3.position.y -= 80;
object3.position.z = -20;
object3.rotation.y = -Math.PI / 5;
// GUI
const pane = new Tweakpane({
title: "VisualLayers"
});
pane.addInput(options, "useLayers", { label: "use layers" });
pane.addInput(options, "showMiddleBox", { label: "show middle box" });
pane.addInput(options, "rotate");
pane
.addInput(options, "layer2Order", {
label: "layer2 order",
options: {
0: 0,
2: 2,
4: 4
}
})
.on("change", () => visualLayers.defineLayer("layer2", options.layer2Order));
// STATS
// SEE: https://github.com/mrdoob/stats.js
stats = new Stats();
stats.domElement.style.position = "absolute";
stats.domElement.style.left = "0px";
stats.domElement.style.top = "0px";
stats.setMode(0);
document.body.appendChild(stats.domElement);
}
//////////////////////
// RESIZE
//////////////////////
(window.onresize = function (event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
})();
//////////////////////
// RAF RENDER LOOP
//////////////////////
function render() {
stats.begin();
if (options.rotate) {
time += clock.getDelta();
object.rotation.y += 0.02;
root.rotation.y = Math.PI / 2 + (Math.PI / 6) * Math.sin(time * 0.001);
}
object2.visible = options.showMiddleBox;
if (options.useLayers) {
scene.updateWorldMatrix(true, true);
renderer.autoClear = false;
renderer.clear();
visualLayers.render(camera, beforeEachLayerRender, afterEachLayerRender);
} else {
renderer.autoClear = true;
renderer.render(scene, camera);
}
stats.end();
}
function renderLoop() {
render();
requestAnimationFrame(renderLoop);
}
function beforeEachLayerRender(layer) {}
function afterEachLayerRender(layer) {
renderer.clearDepth();
}
html,
body,
#container {
margin: 0px;
padding: 0px;
width: 100%;
height: 100%;
}
canvas {
background: transparent;
display: block;
width: 100%;
height: 100%;
position: absolute;
left: 0;
top: 0;
}
<script src="https://cdn.jsdelivr.net/npm/tweakpane#1.5.5/dist/tweakpane.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/stats.js/r11/Stats.min.js"></script>
<script src="//unpkg.com/three#0.121.1/build/three.min.js"></script>
<script src="//unpkg.com/postprocessing#6.17.4/build/postprocessing.js"></script>
(example on codepen)

Resources