I created a shapegeometry with the text. How can I keep the text face the camera on move the camera?
...
this.textGeometry = new THREE.ShapeGeometry(THREE.FontUtils.generateShapes(value, parameters));
this.textValue = new THREE.Mesh(this.textGeometry, new THREE.MeshBasicMaterial({ color: color, side: THREE.DoubleSide }));
this.textValue.matrixAutoUpdate = true;
this.add(this.textValue)
...
I think my problem is that I modified the parent quaternion 3D object:
this.quaternion.setFromAxisAngle (axis, radians);
then the only operation:
textValue.quaternion.copy (camera.quaternion);
is not sufficient
how can I fix the rotation considering the state of the quaternion?
If you don't care about calling the base updateMatrix function,
this can be a solution
yourShapeGeometry.prototype.updateMatrix = function(){
// THREE.Object3D.prototype.updateMatrix.call(this);
fixOrientation(this.textValue);
}
function fixOrientation(mesh){
mesh.setRotationFromQuaternion(camera.quaternion);
mesh.updateMatrix();
}
or simply edit the updateMatrix of your text mesh like
textMesh.updateMatrixWorld = updateSpriteWorld;
function updateSpriteWorld(){
if ( this.matrixWorldNeedsUpdate === true || force === true ) {
this.setRotationFromQuaternion(camera.quaternion);
this.updateMatrix();
this.matrixWorld.copy( this.matrix );
this.matrixWorldNeedsUpdate = false;
force = true;
}
// update children
for ( var i = 0, l = this.children.length; i < l; i ++ ) {
this.children[ i ].updateSpriteWorld( force );
}
}
I think this should do the trick:
this.textValue.lookAt( camera.position );
Related
I've imported a GLTF file with two different meshes. My goal is to give each mesh a material with a unique custom fragment shader using onBeforeCompile. Each mesh has the same type of material (MeshNormalMaterial).
When I try to apply one fragment shader to one material and the other fragment shader to the other material, both materials wind up with the same fragment shader. The fragment shader each material has depends on which material I setup first.
Here's a few pictures showing what I'm talking about:
Below is all the relevant code.
Main code: This is the general structure of my code. I've enclosed the important part between "PRIMARY AREA OF INTEREST" comments. For simplicity, I've replaced my shader code with "..." or a comment describing what it does. They do work as shown in the pictures above.
// Three.JS Canvas
const threeDisplay = document.getElementById("threeDisplay");
// Globals
var displayDimensions = getElemDimensions(threeDisplay); // Uniform
var currentTime = 0; // Uniform
var helix = null; // Mesh
var innerHelix = null; // Mesh
var horseshoe = null; // Mesh
// Set the scene and camera up
const scene = new THREE.Scene();
const camera = initCamera();
// Setup a directional light
const light = new THREE.DirectionalLight( 0xffffff, 1.0 );
light.position.set(-0.2, 1, -0.6);
scene.add(light);
// Setup WebGL renderer
const renderer = initRenderer();
threeDisplay.appendChild( renderer.domElement );
// Load the gltf model
new GLTFLoader().load( "./spiral_pillar_hq_horseshoe.glb", function (object) {
const helixFragmentShaderReplacements = [
{
from: ' ... ',
to: ' // rainbow '
}
];
const horseshoeFragmentShaderReplacements = [
{
from: ' ... ',
to: ' // white '
}
];
//////////////////////////////////////
// PRIMARY AREA OF INTEREST - START //
//////////////////////////////////////
// Turn the horseshoe into a shader.
horseshoe = object.scene.children[1];
var horseshoeGeometry = horseshoe.geometry;
var horseshoeMaterial = shaderMeshMaterial(new THREE.MeshNormalMaterial(), horseshoeGeometry, horseshoeFragmentShaderReplacements);
var horseshoeMesh = new THREE.Mesh(horseshoeGeometry, horseshoeMaterial);
horseshoe = horseshoeMesh;
horseshoe.rotation.z = deg2rad(180); // Re-orient the horseshoe to the correct position and rotation.
horseshoe.position.y = 13;
scene.add(horseshoe);
// Turn the inner helix into a colorful, wiggly shader.
helix = object.scene.children[0];
var helixGeometry = helix.geometry;
var helixMaterial = shaderMeshMaterial(new THREE.MeshNormalMaterial(), helixGeometry, helixFragmentShaderReplacements);
var helixMesh = new THREE.Mesh(helixGeometry, helixMaterial);
helix = helixMesh;
scene.add(innerHelix);
animate();
////////////////////////////////////
// PRIMARY AREA OF INTEREST - END //
////////////////////////////////////
}, undefined, function (error) {
console.error(error);
});
Below are functions which are relevant.
shaderMeshMaterial: Constructs a new material based on the supplied materialType that supports editing the default shader. If it's not initProcessing, then the problem may stem from this function.
// Globals used: displayDimensions
function shaderMeshMaterial(materialType, geometry, fragmentShaderReplacements) {
var material = materialType;
material.onBeforeCompile = function ( shader ) {
// Uniforms
shader.uniforms.time = { value: 0 };
shader.uniforms.resolution = { value: new THREE.Vector2(displayDimensions.width, displayDimensions.height) };
shader.uniforms.bboxMin = { value: geometry.boundingBox.min };
shader.uniforms.bboxMax = { value: geometry.boundingBox.max };
fragmentShaderReplacements.forEach((rep) => {
shader.fragmentShader = shader.fragmentShader.replace(rep.from, rep.to);
});
console.log(shader);
material.userData.shader = shader;
}
return material;
}
initRenderer: Sets up the renderer. Just showing you guys the renderer setup I have in case that's important.
// Globals used: displayDimensions
function initRenderer() {
var renderer = new THREE.WebGLRenderer({
alpha: true,
antialias: true,
precision: "mediump"
});
renderer.setClearColor( 0x000000, 0);
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( displayDimensions.width, displayDimensions.height );
renderer.shadowMap.enabled = true;
renderer.outputEncoding = THREE.sRGBEncoding;
renderer.toneMapping = THREE.ACESFilmicToneMapping;
renderer.toneMappingExposure = 1.25;
return renderer;
}
animate: Handles the animation frames.
// Globals used: renderer, currentTime, postprocessing
function animate (timestamp = 0) {
requestAnimationFrame(animate);
resizeRendererToDisplaySize(renderer);
currentTime = timestamp/1000; // Current time in seconds.
scene.traverse( function ( child ) {
if ( child.isMesh ) {
const shader = child.material.userData.shader;
if ( shader ) {
shader.uniforms.time.value = currentTime;
}
}
} );
renderer.render( scene, camera );
postprocessing.composer.render( 0.1 );
};
One last thing to note is that when I inspected the console log of shader from the shaderMeshMaterial function, I can see that the fragment shaders are indeed different as they should be for each material. Also not sure why there are 4 console logs when there should only be 2.
Sorry for all the code, but I did condense it to where all irrelevant code was stripped out. I'm fairly new to Three.JS, so any possible explanations as to why this is happening are much appreciated!
EDIT: Removed vertex shader parameter from shaderMeshMaterial function to keep this question focused on just the fragment shaders. Though this problem does apply to both the vertex and fragment shaders, I figure if you fix one then you'll fix the other.
EDIT 2: Added language identifiers to code snippets. Also I removed the postprocessing function and the problem still persists, so I know the problem isn't caused by that. I've updated the code above to reflect this change. As a happy side effect of removing the postprocessing function, the console.log of the shader variable from shaderMeshMaterial new appears twice in the log (as it should).
EDIT 3: (Implementing WestLangley's suggestion) I tweaked the shaderMeshMaterial function by adding the customProgramCacheKey function. I had to condense the four parameters of shaderMeshMaterial into one for the sake of the customProgramCacheKey function. I believe I implemented the function correctly, but I'm still getting the same result as before where both materials display the same fragment shader.
New "PRIMARY AREA OF INTEREST" code:
horseshoe = object.scene.children[1];
var horseshoeGeometry = horseshoe.geometry;
var meshData = {
materialType: new THREE.MeshNormalMaterial(),
geometry: horseshoeGeometry,
fragmentShaderReplacements: horseshoeFragmentShaderReplacements
}
var horseshoeMaterial = shaderMeshMaterial(meshData);
var horseshoeMesh = new THREE.Mesh(horseshoeGeometry, horseshoeMaterial);
horseshoe = horseshoeMesh;
horseshoe.rotation.z = deg2rad(180); // Re-orient the horseshoe to the correct position and rotation.
horseshoe.position.y = 13;
scene.add(horseshoe);
// Turn the inner helix into a colorful, wiggly shader.
helix = object.scene.children[0];
var helixGeometry = helix.geometry;
var meshData2 = {
materialType: new THREE.MeshNormalMaterial(),
geometry: helixGeometry,
fragmentShaderReplacements: helixFragmentShaderReplacements
}
var helixMaterial = shaderMeshMaterial(meshData2);
var helixMesh = new THREE.Mesh(helixGeometry, helixMaterial);
helix = helixMesh;
scene.add(innerHelix);
animate();
New shaderMeshMaterial code:
// Globals used: displayDimensions
function shaderMeshMaterial(meshData) {
var material = meshData.materialType;
material.onBeforeCompile = function ( shader ) {
// Uniforms
shader.uniforms.time = { value: 0 };
shader.uniforms.resolution = { value: new THREE.Vector2(displayDimensions.width, displayDimensions.height) };
shader.uniforms.bboxMin = { value: meshData.geometry.boundingBox.min };
shader.uniforms.bboxMax = { value: meshData.geometry.boundingBox.max };
meshData.fragmentShaderReplacements.forEach((rep) => {
shader.fragmentShader = shader.fragmentShader.replace(rep.from, rep.to);
});
material.customProgramCacheKey = function () {
return meshData;
};
console.log(shader);
material.userData.shader = shader;
}
return material;
}
WestLangley suggestion worked for me!
material.onBeforeCompile = ...
// Make sure WebGLRenderer doesnt reuse a single program
material.customProgramCacheKey = function () {
return UNIQUE_PER_MATERIAL_ID;
};
I believe your mistake is returning meshData from customProgramCacheKey.
I think customProgramCacheKey need concrete identifier like a number or string.
It would be nice to understand what exactly happening and why do we need to specify customProgramCacheKey.
EDIT: I discover that default value for customProgramCacheKey calculated as follow in Threejs source.
customProgramCacheKey() {
return this.onBeforeCompile.toString();
}
Perhaps this is explains this default caching behavior because calling toString on function returns that function body literally as string.
For example consider function const myFunc = () => { return 1 }. Calling myFunc.toString() returns "() => { return 1 }"
So if your calling onBeforeCompile in a for loop you function body as string never change.
I'm facing a very weird experience with WebXR API. WebXR API changes the VR camera properties when I re-enter the VR. The camera somehow cuts my objects (shown below) when I re-enter the VR mode the second, third or nth time.
It always works properly (shown below) when I enter VR first time.
I would like to know why the objects are getting cut on the second/third/nth VR attempt and also how to debug WebXR immersive-vr camera properties.
I'm using very basic WebXR API codes as follows:
window.onload=function()
{
init();
animate();
}
function init()
{
canvas = document.getElementById( 'vw_canvas' );
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
canvasCssWidth = canvas.style.width;
canvasCssHeight = canvas.style.height;
group = new THREE.Object3D();//we create this to make it a parent of camera object.
camera = new THREE.PerspectiveCamera( 75, canvas.width / canvas.height, 1, 10000 );
group.rotation.order = 'XZY';//default is XYZ..we change this to XZY. because in case of XYZ when we rotate the object around its Y axis even the X and Z axis changes. so to avoid that we give higher priority to Z axis.
scene = new THREE.Scene();
group.add(camera);
scene.add(group);
//add more 3D objects to scene
renderer = new THREE.WebGLRenderer({antialias:true, powerPreference: "high-performance"});
renderer.setPixelRatio( canvas.devicePixelRatio );
renderer.setSize( canvas.width, canvas.height );
renderer.xr.enabled = true;
renderer.xr.setReferenceSpaceType( 'local' );
canvas.appendChild(renderer.domElement);
var WEBVR =
{
createButton: function ( renderer )
{
function showEnterXR( /*device*/ ) {
var currentSession = null;
function onSessionStarted( session )
{
session.addEventListener( 'end', onSessionEnded );
renderer.xr.setSession( session );
vrButton.style.backgroundImage="url('icons/noVR.svg')";
document.getElementById("vr-button-tooltip").setAttribute("tooltip","Exit VR");
currentSession = session;
isVRpresenting=true;
openVRMenu();
}
function onSessionEnded( event )
{
currentSession.removeEventListener( 'end', onSessionEnded );
renderer.xr.setSession( null );
vrButton.style.backgroundImage="url('icons/yesVR.svg')";
document.getElementById("vr-button-tooltip").setAttribute("tooltip","Enter VR");
currentSession = null;
isVRpresenting=false;
closeVRMenu();
}
vrButton.style.backgroundImage="url('icons/yesVR.svg')";
document.getElementById("vr-button-tooltip").setAttribute("tooltip","Enter VR");
isVRpresenting=false;
vrButton.onclick = function ()
{
if (runOnlyOnce)
{
makeVRMenuItems();
runOnlyOnce=false;
}
if ( currentSession === null )
{
var sessionInit = { optionalFeatures: [ 'local-floor', 'bounded-floor' ] };
navigator.xr.requestSession( 'immersive-vr', sessionInit ).then( onSessionStarted );
}
else
{
currentSession.end();
}
};
}
function showVRNotFound()
{
vrButton.onclick = function ()
{
//open VR popup that shows devices to use in order to exp VR
};
isVRavailable=false;
vrButton.style.backgroundImage="url('icons/noVR.svg')";
document.getElementById("vr-button-tooltip").setAttribute("tooltip","VR is not supported on your device");
vrButton.onclick = null;
// renderer.xr.setDevice( null );
isVRpresenting=false;
}
if ( 'xr' in navigator )
{
isVRavailable=true;
navigator.xr.isSessionSupported( 'immersive-vr' ).then( function ( supported ) {
supported ? showEnterXR() : showVRNotFound();
} );
}
else
{
vrButton.onclick = function ()
{
//open VR popup that shows devices to use in order to exp VR
};
isVRavailable=false;
vrButton.style.backgroundImage="url('icons/noVR.svg')";
document.getElementById("vr-button-tooltip").setAttribute("tooltip","VR is not supported on you device");
isVRpresenting=false;
}
},
};
WEBVR.createButton( renderer );
}
function animate()
{
renderer.setAnimationLoop( animate );
update();
}
function update()
{
renderer.render( scene, camera );
}
Seems like the WebXR session takes the scene camera parameters for the first time it enters VR. Then, on second and subsequent visits the WebXR session sets the camera to default settings. Hence, in order to update the camera properties to be same as scene camera properties, we need to use this session.updateRenderState({ depthFar: 10000 });.In my case my scene camera had depthFar=10000 but WebXR camera resets the depthFar property to 1000 in the second and subsequent visits in VR, which was the reason of frustum culling (image in question).
I can’t seem to be able to clone an FBX model (FBX downloaded from Mixamo) while retaining animation keyframes.
Have attempted a number of approaches including using the cloneFbx gist (included in the example below); all to no avail. Even placing the entire FBXLoader() function inside a loop does not work as expected since only one of the models will animate at a time.
This issue has been partially addressed here, but I cannot seem to ‘copy’ the animation sequence as answer suggests.
Can anyone point out where I’m going wrong?
Here's a rough example of one of my tests:
Load fbx model and store animation:
var loader = new THREE.FBXLoader();
loader.load( 'models/Walking.fbx', function ( fbx ) {
clip = fbx.animations[ 0 ];
// createVehicle(fbx); // Works! Creates one animated model via FBX
// cloneFbx via: https://gist.github.com/kevincharm/bf12a2c673b43a3988f0f171a05794c1
for (var i = 0; i < 2; i++) {
const model = cloneFbx(fbx);
createVehicle(model);
}
});
Add mixers and actions based on stored clip, add model to scene:
function createVehicle(model){
model.mixer = new THREE.AnimationMixer( model );
mixers.push( model.mixer );
var action = model.mixer.clipAction( clip );
action.play();
model.traverse( function ( child ) {
if ( child.isMesh ) {
child.castShadow = true;
child.receiveShadow = true;
}
});
const x = Math.random() * groundSize - groundSize/2;
const z = Math.random() * groundSize - groundSize/2;
model.position.set(x, 0, z);
const vehicle = new Vehicle(model, x, z);
vehicles.push(vehicle);
scene.add( model );
}
Animation cycle:
if ( mixers.length > 0 ) {
for ( var i = 0; i < mixers.length; i ++ ) {
mixers[ 0 ].update( clock.getDelta() );
}
}
Couldn’t figure out an elegant solution to this. Best I could come up with is creating a loop with the loading sequence inside of it; this is very slow (since the FBX has to be parsed each time).
The key here was having an animation mixer controlling the animated objects as a group as opposed to creating a mixer per animated object.
If anyone can figure out a better solution, I would be super keen to hear it (perhaps using the cloneFbx script properly).
Create mixer, load FBX:
// Create mixer to run animations
mixer = new THREE.AnimationMixer( scene );
// Load fbx
var loader = new THREE.FBXLoader();
for (var i = 0; i < 5; i++) {
loader.load( 'models/Walking.fbx', function ( fbx ) {
mixer.clipAction( fbx.animations[ 0 ], fbx )
.startAt( - Math.random() )
.play();
createVehicle(fbx);
});
}
Create class instances, add to scene:
function createVehicle(model){
const x = Math.random() * groundSize - groundSize/2;
const z = Math.random() * groundSize - groundSize/2;
model.position.set(x, 0, z);
const vehicle = new Vehicle(model, x, z);
vehicles.push(vehicle);
scene.add( model );
}
Draw cycle:
mixer.update( clock.getDelta() );
I found out that SkeletonUtils.clone() works good for me.
https://threejs.org/docs/index.html#examples/en/utils/SkeletonUtils.clone
I am working with an object which i loaded like this (I save it as a global variable such that I can change it later):
model1 = null;
var mtlLoader = new THREE.MTLLoader();
mtlLoader.load("http://blabla.mtl", function(materials) {
materials.preload();
var objLoader = new THREE.OBJLoader();
objLoader.setMaterials(materials);
objLoader.load("http://blabla.obj", function(object) {
object.scale.x = 0.0004;
object.scale.y = 0.0004;
object.scale.z = 0.0004;
object.rotateX(Math.PI / 2);
object.rotateZ(Math.PI / 2);
object.add(new THREE.AxisHelper(2))
model1 = object; //save in global variables
scene.add(model1);
});
});
When I am rendering I want to change the position of the model, based on a box's change in position:
function render(ms: number) {
if (lastTime) {
update((ms-lastTime)/1000)
}
lastTime = ms
requestAnimationFrame(render)
renderer.render( scene, camera )
}
var pos_current;
var pos_new;
var trans;
function update(dt: number) {
if (pause.on) return
//save position of box before it is transformed
pos_current = box.position.clone();
box.updatePosition()
//save new position of box
pos_new = box.position.clone();
//transform the 3D-model according to the box's transformation
trans = pos_current.sub(pos_new);
model1.position.add(trans);
console.log(model1.position)
I can in the console see that the position of model1 changes, but the object just doesn't move at all, and I can't really figure out why.
Any help is very appreciated :)
Got it to work - the problem was that I was trying to update the position of model1, before it was actually done loading in the obj-loader :D
I have a query. I too made a global variable and equaled it to object when loading. but it says in console log that variable in null. Is there a something I am missing.
Update:-
I understood the part where I am making the humanModel equal to object before it loaded but then how to handle it then?
let humanModel = null;
OBJLoader.load(
// resource URL
"/Models/Model_1(Malequins)/1/Cadnav.com_B0426014.obj",
// called when resource is loaded
function(object) {
scene.add(object);
humanModel = object; //adding the humanModel to the global variable
camera.lookAt(object.position);
console.log(object.position);
console.log(camera.position);
},
// called when loading is in progresses
function(xhr) {
console.log((xhr.loaded / xhr.total) * 100 + "% loaded");
},
// called when loading has errors
function(error) {
console.log("An error happened " + error);
}
);
console.log(humanModel.position);
Thanks
you can keep the object(obj file) in x,y or z direction, for it you need to set its position as:
var loader = new THREE.OBJLoader();
loader.load(
'/models/female.obj',
function(object){
object.position.y=-20;
scene.add(object);
}
)
Having problem understanding the class system in Three.js
I have a code in player.js :
function Player() {
var mesh = new THREE.Object3D();
this.player = null;
this.loader = new THREE.JSONLoader();
this.name = 'player';
this.loader.load(
'obj/models/minecraft_sole.json',
function ( geometry, materials ) {
var material = new THREE.MultiMaterial( materials );
this.player = new THREE.Mesh( geometry, material );
this.player.position.set(0, 0, 0);
this.player.scale.set(.5,.5,.5);
this.player.castShadow = true;
this.player.receiveShadow = false;
mesh.add( this.player );
}
);
Player.prototype.constructor = Player;
Player.prototype = Object.create(THREE.Object3D.prototype);
}
Player.prototype.updatePosition = function(){
this.mesh.position.y += 0.05;
}
And basically what I'm trying to achieve:
In main.js after all standard setup for init()/render()/animate() and all...
I create variable called johny:
var johny = new Player();
Now everything loads great and all, but in player.js i want to be able to define some prototype ? method, and in that method I want to listen for a click event. After that event is called I want my player mesh to animate to certain position or start rotating/scaling.
My pseudo code for better understanding is:
var Player = function(){
// define mesh and all
}
player.add.eventListener( 'click' ){
//code to animate player
}
Remember that all this; I want to be a part of player.js so that after calling:
var johny = new Player();
I don't have to add event listening functions to the main.js and all that.
And second of all I want my code to be modular, as You may already noticed :)
So I did managed to understand it.